From 5ec94ece9db4b897a162d313d1aa81a713738354 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Thu, 12 Sep 2024 09:00:21 +0000 Subject: [PATCH 01/24] stubs --- src/aks-preview/azext_aks_preview/_params.py | 9 ++++++ src/aks-preview/azext_aks_preview/custom.py | 4 +++ .../managed_cluster_decorator.py | 29 ++++++++++++++++++- .../azext_dataprotection/manual/_params.py | 9 +++++- .../manual/aks/akshelper.py | 9 ++++++ .../azext_dataprotection/manual/commands.py | 6 ++++ .../azext_dataprotection/manual/custom.py | 7 +++++ .../azext_dataprotection/manual/enums.py | 10 +++++++ .../test_dataprotection_backup_policy.py | 3 ++ 9 files changed, 84 insertions(+), 2 deletions(-) create mode 100644 src/dataprotection/azext_dataprotection/manual/aks/akshelper.py diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index a6e60c7c5b3..0c276a55e37 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -23,6 +23,11 @@ validate_nat_gateway_idle_timeout, validate_nat_gateway_managed_outbound_ip_count, ) + +from azext_dataprotection.manual.enums import ( + backup_presets +) + from azure.cli.core.commands.parameters import ( edge_zone_type, file_type, @@ -129,6 +134,7 @@ CONST_TLS_MANAGEMENT_MANAGED, CONST_TLS_MANAGEMENT_NONE, ) +from azure.cli.core.commands.validators import validate_file_or_dict from azext_aks_preview._validators import ( validate_acr, validate_addon, @@ -1415,6 +1421,9 @@ def load_arguments(self, _): 'For more information on "Auto" mode see aka.ms/aks/nap.' ) ) + c.argument("enable_backup", help="Enable backup for the cluster", is_preview=True, action="store_true") + c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Recommended.", is_preview=True) + c.argument("backup_configuration_parameters", type=validate_file_or_dict, help="Backup configuration overrides.", is_preview=True) # In update scenario, use emtpy str as default. c.argument('ssh_access', arg_type=get_enum_type(ssh_accesses), is_preview=True) c.argument('enable_static_egress_gateway', is_preview=True, action='store_true') diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index c53f6a4a87a..7f341880717 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -871,6 +871,10 @@ def aks_update( # IMDS restriction enable_imds_restriction=False, disable_imds_restriction=False, + # Backup + enable_backup=False, + backup_strategy=None, + backup_configuration_parameters=None, ): # DO NOT MOVE: get all the original parameters and save them as a dictionary raw_parameters = locals() diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index be1b3662cef..586538a16d8 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -6,6 +6,7 @@ # pylint: disable=too-many-lines import copy import datetime +import json import os from types import SimpleNamespace from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union @@ -5456,8 +5457,34 @@ def update_mc_profile_preview(self) -> ManagedCluster: # update static egress gateway mc = self.update_static_egress_gateway(mc) # update imds restriction - mc = self.update_imds_restriction(mc) + mc = self.update_imds_restriction(mc) + # set up backup + mc = self.set_up_backup(mc) + + + return mc + + def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: + + enable_backup = self.context.raw_param.get("enable_backup") + if enable_backup: + from azext_dataprotection.manual.enums import backup_presets + + backup_strategy = self.context.raw_param.get("backup_strategy") + backup_configuration_parameters = self.context.raw_param.get("backup_configuration_parameters") + from msrestazure.tools import resource_id + + cluster_resource_id = resource_id( + subscription=self.context.get_subscription_id(), + resource_group=self.context.get_resource_group_name(), + namespace="Microsoft.ContainerService", + type="managedClusters", + name=self.context.get_name(), + ) + + from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper + dataprotection_enable_backup_helper(str(cluster_resource_id), json.dumps(backup_strategy), json.dumps(backup_configuration_parameters)) return mc def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index ac58b8c3055..c8c13e52dbd 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -42,7 +42,8 @@ get_permission_scope_values, get_resource_type_values, get_persistent_volume_restore_mode_values, - get_conflict_policy_values + get_conflict_policy_values, + backup_presets, ) vault_name_type = CLIArgumentType(help='Name of the backup vault.', options_list=['--vault-name', '-v'], type=str) @@ -161,6 +162,12 @@ def load_arguments(self, _): 'json-string/@json-file. Required when --operation is Backup') c.argument('restore_request_object', type=validate_file_or_dict, help='Request body for operation "Restore" Expected value: ' 'json-string/@json-file. Required when --operation is Restore') + + ## dataprotection enable-backup + with self.argument_context('dataprotection enable-backup') as c: + c.argument('datasource_uri', type=str, help="The URI of the datasource to be backed up.") + c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Recommended.") + c.argument('configuration_parameters', type=validate_file_or_dict, help="Workload specific configuration overrides.") with self.argument_context('dataprotection job show') as c: c.argument('resource_group_name', resource_group_name_type) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py new file mode 100644 index 00000000000..559e846acf7 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py @@ -0,0 +1,9 @@ +import json +from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED + +def dataprotection_enable_backup_helper(datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): + # Do GET on exten + print("Do GET on extension") + print("datasourceUri: " + datasource_uri) + print("backupStrategy: " + backup_strategy) + print ("configurationParams: " + json.dumps(configuration_params)) diff --git a/src/dataprotection/azext_dataprotection/manual/commands.py b/src/dataprotection/azext_dataprotection/manual/commands.py index 10bb6660612..1a14634f256 100644 --- a/src/dataprotection/azext_dataprotection/manual/commands.py +++ b/src/dataprotection/azext_dataprotection/manual/commands.py @@ -122,3 +122,9 @@ def load_command_table(self, _): with self.command_group('dataprotection recovery-point', exception_handler=exception_handler) as g: g.custom_command('list', 'dataprotection_recovery_point_list') + + with self.command_group('dataprotection enable-backup', exception_handler=exception_handler) as g: + g.custom_command('initialize', 'dataprotection_enable_backup', supports_no_wait=False) + g.custom_command('fix', 'dataprotection_enable_backup', supports_no_wait=True) + g.custom_command('debug', 'dataprotection_enable_backup', supports_no_wait=True) + \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 03a19c9e3ae..1428fb3c2ff 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -13,6 +13,7 @@ # pylint: disable=too-many-nested-blocks # pylint: disable=no-else-continue # pylint: disable=no-else-raise +import json import time from azure.cli.core.azclierror import ( RequiredArgumentMissingError, @@ -31,6 +32,7 @@ QueryRequest, QueryRequestOptions from azext_dataprotection.manual import backupcenter_helper, helpers as helper from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as BackupVaultGet +from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED logger = get_logger(__name__) @@ -1015,3 +1017,8 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, vaulted_blob_prefix_pattern) return restore_request + +def dataprotection_enable_backup(cmd, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): + + from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper + dataprotection_enable_backup_helper( datasource_uri, backup_strategy, configuration_params) diff --git a/src/dataprotection/azext_dataprotection/manual/enums.py b/src/dataprotection/azext_dataprotection/manual/enums.py index d522eb6ad57..ed2af569414 100644 --- a/src/dataprotection/azext_dataprotection/manual/enums.py +++ b/src/dataprotection/azext_dataprotection/manual/enums.py @@ -92,3 +92,13 @@ def get_persistent_volume_restore_mode_values(): def get_conflict_policy_values(): return ['Skip', 'Patch'] + +CONST_RECOMMENDED = 'Recommended' +CONST_DEFAULT = 'Default' +CONST_DAILY = 'Daily' + +backup_presets = [ + CONST_RECOMMENDED, + CONST_DEFAULT, + CONST_DAILY +] \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py index 3c35a3c1501..872b087e57f 100644 --- a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py +++ b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py @@ -132,3 +132,6 @@ def test_dataprotection_backup_policy_generic_criteria(test): 'JANUARY February MarCh april May June July August September October November December', checks=[ test.check('length(months_of_year)', 12) ]) + + def test_dataprotection_enable_backup(test): + test.cmd('az dataprotection enable-backup enable-backup') \ No newline at end of file From f0c0c5e6c5608f37ca635d74cb05471d6b904b4f Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Tue, 17 Sep 2024 11:14:26 +0000 Subject: [PATCH 02/24] draft changes --- .../manual/aks/akshelper.py | 46 ++++++++++++++++++- .../azext_dataprotection/manual/custom.py | 11 +++-- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py index 559e846acf7..fed049bbd6e 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py @@ -1,9 +1,53 @@ import json from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED +from azure.identity import DefaultAzureCredential +from azure.mgmt.kubernetesconfiguration import SourceControlConfigurationClient -def dataprotection_enable_backup_helper(datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): + +def dataprotection_enable_backup_helper(cli_ctx, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): # Do GET on exten print("Do GET on extension") print("datasourceUri: " + datasource_uri) print("backupStrategy: " + backup_strategy) print ("configurationParams: " + json.dumps(configuration_params)) + + # extract subscriptoin ID from datasource_uri + clusterSubscriptionId = datasource_uri.split('/')[2] + clusterResourceGroup = datasource_uri.split('/')[4] + clusterName = datasource_uri.split('/')[8] + + extension = __get_extension(cli_ctx, subscription_id=clusterSubscriptionId, resource_group=clusterResourceGroup, cluster_name=clusterName) + +def __get_extension(cli_ctx, subscription_id, resource_group, cluster_name): + # https://learn.microsoft.com/en-us/python/api/overview/azure/mgmt-kubernetesconfiguration-readme?view=azure-python + # use cli_ctx to make credential object + # client = SourceControlConfigurationClient(credential=DefaultAzureCredential) + # client = SourceControlConfigurationClient(credential=DefaultAzureCredential(), subscription_id=subscription_id) + # extensions = client.extensions.list(cluster_rp="Microsoft.ContainerService", cluster_resource_name="ManagedClusters", resource_group_name=resource_group, cluster_name=cluster_name) # iterate and find the extension of type dataprotection.microsoft + # seriliaze cli_ctx and print + print(cli_ctx.) + # if len(extensions) == 0: + # print("No extensions found") + # else: + # for extension in extensions: + # if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': + # print("Extension found: " + extension.name) + # + # Check if there is an SA in cluster RG with azure tag - clusterName = backup + # If not, create one with name bkp- (4) (4) sha256 of cluster URI + # + # + # + # P2 - Using Extension routing, if there is a BI already for the cluster. If there, is print the vault name where it resides. (this can be the very first step) + # + # + # Check if there is a backupvault in the subscription with tag, default=true + # + # If there is no such backup vault, create a resource group with tag backup-resource-group=true + # Create a backup vault in the resource group with tag default=true + # + # Check if the Vault has a policy with params matching Recommended Policy params + # + # + # + # diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 1428fb3c2ff..9fec9153481 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1019,6 +1019,11 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, return restore_request def dataprotection_enable_backup(cmd, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): - - from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper - dataprotection_enable_backup_helper( datasource_uri, backup_strategy, configuration_params) + + # if uri contains case insensitive Microsoft.ContainerService/managedClusters contains and add if check + if "Microsoft.ContainerService/managedClusters".lower() in datasource_uri.lower(): + from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper + dataprotection_enable_backup_helper(cmd.cli_ctx, datasource_uri, backup_strategy, configuration_params) + return + else: + raise InvalidArgumentValueError("Unsupported datasource type for command") From 3f93d8e15c3eb26ac7b6986a6b93da6bf2dbe408 Mon Sep 17 00:00:00 2001 From: Rishabh Raj Date: Thu, 19 Sep 2024 16:41:23 +0530 Subject: [PATCH 03/24] Update README.md test --- src/acat/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/acat/README.md b/src/acat/README.md index 55cd2f2e7f6..dd62d907731 100644 --- a/src/acat/README.md +++ b/src/acat/README.md @@ -1,3 +1,4 @@ +Test # Azure CLI Acat Extension # This is an extension to Azure CLI to manage Acat resources. @@ -69,4 +70,4 @@ az acat report webhook create ` # check if the webhook is configured correctly az acat report webhook list --report-name $reportName | ConvertFrom-Json az acat report webhook show --report-name $reportName --webhook-name $hookName -``` \ No newline at end of file +``` From bfc9dd48d5d267954aec1e09cd591ed811c3f3b5 Mon Sep 17 00:00:00 2001 From: "FAREAST\\rishraj" Date: Fri, 20 Sep 2024 10:49:15 +0530 Subject: [PATCH 04/24] Create backup extension --- .../manual/aks/akshelper.py | 124 ++++++++++++++---- .../azext_dataprotection/manual/custom.py | 2 +- 2 files changed, 101 insertions(+), 25 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py index fed049bbd6e..2e0dd42c598 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py @@ -1,10 +1,8 @@ import json from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED -from azure.identity import DefaultAzureCredential -from azure.mgmt.kubernetesconfiguration import SourceControlConfigurationClient +from azure.cli.core.commands.client_factory import get_mgmt_service_client - -def dataprotection_enable_backup_helper(cli_ctx, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): +def dataprotection_enable_backup_helper(cmd, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): # Do GET on exten print("Do GET on extension") print("datasourceUri: " + datasource_uri) @@ -12,26 +10,104 @@ def dataprotection_enable_backup_helper(cli_ctx, datasource_uri: str, backup_str print ("configurationParams: " + json.dumps(configuration_params)) # extract subscriptoin ID from datasource_uri - clusterSubscriptionId = datasource_uri.split('/')[2] - clusterResourceGroup = datasource_uri.split('/')[4] - clusterName = datasource_uri.split('/')[8] - - extension = __get_extension(cli_ctx, subscription_id=clusterSubscriptionId, resource_group=clusterResourceGroup, cluster_name=clusterName) - -def __get_extension(cli_ctx, subscription_id, resource_group, cluster_name): - # https://learn.microsoft.com/en-us/python/api/overview/azure/mgmt-kubernetesconfiguration-readme?view=azure-python - # use cli_ctx to make credential object - # client = SourceControlConfigurationClient(credential=DefaultAzureCredential) - # client = SourceControlConfigurationClient(credential=DefaultAzureCredential(), subscription_id=subscription_id) - # extensions = client.extensions.list(cluster_rp="Microsoft.ContainerService", cluster_resource_name="ManagedClusters", resource_group_name=resource_group, cluster_name=cluster_name) # iterate and find the extension of type dataprotection.microsoft - # seriliaze cli_ctx and print - print(cli_ctx.) - # if len(extensions) == 0: - # print("No extensions found") - # else: - # for extension in extensions: - # if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': - # print("Extension found: " + extension.name) + cluster_subscription_id = datasource_uri.split('/')[2] + cluster_resource_group_name = datasource_uri.split('/')[4] + cluster_name = datasource_uri.split('/')[8] + + storage_account_subscription_id = "f0c630e0-2995-4853-b056-0b3c09cb673f" + storage_account_resource_group_name = "rg2eacanrraj" + storage_account_name = "tinysarraj" + storage_account_container_name = "container" + + backup_vault_subscription_id = "f0c630e0-2995-4853-b056-0b3c09cb673f" + backup_vault_resource_group = "rgwerraj" + backup_vault_name = "vaultwerraj" + + + """ + - Create backup vault and policy in the cluster resource group* + - Create backup resource group + - Create backup storage account and container + - Create backup extension + - Create trusted access role binding + - Assign all permissions + - Create backup instance + """ + + storage_account_arm_id = __generate_arm_id(storage_account_subscription_id, storage_account_resource_group_name, "Microsoft.Storage/storageAccounts", storage_account_name) + + # backup_extension = __create_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name) + from azure.cli.command_modules.role.custom import list_role_assignments, create_role_assignment + + create_role_assignment( + cmd, + assignee="e433a43c-9667-4e6a-9f73-8213565eb49e", + # assignee=backup_extension.aks_assigned_identity.principal_id, + role="Storage Blob Data Contributor", + scope=storage_account_arm_id) + + + +# Example usage +# __create_backup_storage_account_and_container(cli_ctx, "your_subscription_id", "your_resource_group_name", "your_storage_account_name", "your_container_name", "eastus") + + +# Example usage +# __create_resource_group(cli_ctx, "your_subscription_id", "your_resource_group_name", "eastus") + +def __generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): + return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" + +def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name): + from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient + from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration.v2023_05_01.models import Extension + + k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=subscription_id) + + extensions = k8s_configuration_client.extensions.list( + cluster_rp="Microsoft.ContainerService", + cluster_resource_name="managedClusters", + resource_group_name=resource_group_name, + cluster_name=cluster_name) + + for page in extensions.by_page(): + for extension in page: + if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': + print("Extension found: " + extension.name) + break + + print("Creating backup extension...") + + from azure.cli.core.extension.operations import add_extension_to_path + from importlib import import_module + add_extension_to_path("k8s-extension") + K8s_extension_client_factory = import_module("azext_k8s_extension._client_factory") + k8s_extension_module = import_module("azext_k8s_extension.custom") + + # return k8s_extension_module.create_k8s_extension( + # cmd=cmd, + # client=K8s_extension_client_factory.cf_k8s_extension_operation(cmd.cli_ctx), + # resource_group_name=resource_group_name, + # cluster_name=cluster_name, + # name="azure-aks-backup", + # cluster_type="managedClusters", + # extension_type="microsoft.dataprotection.kubernetes", + # cluster_resource_provider="Microsoft.ContainerService", + # scope="cluster", + # auto_upgrade_minor_version=True, + # release_train="stable", + # configuration_settings=[{ + # "blobContainer": "container", + # "storageAccount": "tinysarraj", + # "storageAccountResourceGroup": "rg2eacanrraj", + # "storageAccountSubscriptionId": "f0c630e0-2995-4853-b056-0b3c09cb673f" + # }] + # ).result() + + + # print(response) + # print(response.result()) + # # Check if there is an SA in cluster RG with azure tag - clusterName = backup # If not, create one with name bkp- (4) (4) sha256 of cluster URI diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 9fec9153481..1d6ced09bd6 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1023,7 +1023,7 @@ def dataprotection_enable_backup(cmd, datasource_uri: str, backup_strategy=CONST # if uri contains case insensitive Microsoft.ContainerService/managedClusters contains and add if check if "Microsoft.ContainerService/managedClusters".lower() in datasource_uri.lower(): from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper - dataprotection_enable_backup_helper(cmd.cli_ctx, datasource_uri, backup_strategy, configuration_params) + dataprotection_enable_backup_helper(cmd, datasource_uri, backup_strategy, configuration_params) return else: raise InvalidArgumentValueError("Unsupported datasource type for command") From 8fdce196c81153c82e810db9c76ceedd4e0fc93a Mon Sep 17 00:00:00 2001 From: "FAREAST\\rishraj" Date: Sat, 21 Sep 2024 23:57:23 +0530 Subject: [PATCH 05/24] Prototype for one command data protection --- .../managed_cluster_decorator.py | 2 +- .../manual/aks/aks_helper.py | 322 + .../manual/aks/akshelper.py | 129 - .../azext_dataprotection/manual/custom.py | 2 +- .../azure_mgmt_containerservice/__init__.py | 20 + .../_configuration.py | 66 + .../_container_service_client.py | 2655 ++++++ .../_serialization.py | 2006 ++++ .../azure_mgmt_containerservice/_version.py | 8 + .../aio/__init__.py | 10 + .../aio/_configuration.py | 66 + .../aio/_container_service_client.py | 2655 ++++++ .../azure_mgmt_containerservice/models.py | 11 + .../azure_mgmt_containerservice/py.typed | 1 + .../v2024_07_01/__init__.py | 26 + .../v2024_07_01/_configuration.py | 65 + .../v2024_07_01/_container_service_client.py | 179 + .../v2024_07_01/_patch.py | 20 + .../v2024_07_01/_version.py | 9 + .../v2024_07_01/aio/__init__.py | 23 + .../v2024_07_01/aio/_configuration.py | 65 + .../aio/_container_service_client.py | 182 + .../v2024_07_01/aio/_patch.py | 20 + .../v2024_07_01/aio/operations/__init__.py | 39 + .../aio/operations/_agent_pools_operations.py | 1134 +++ .../aio/operations/_machines_operations.py | 212 + .../_maintenance_configurations_operations.py | 418 + .../_managed_clusters_operations.py | 2822 ++++++ .../v2024_07_01/aio/operations/_operations.py | 133 + .../v2024_07_01/aio/operations/_patch.py | 20 + ...private_endpoint_connections_operations.py | 446 + .../_private_link_resources_operations.py | 115 + ...olve_private_link_service_id_operations.py | 193 + .../aio/operations/_snapshots_operations.py | 608 ++ ...trusted_access_role_bindings_operations.py | 549 ++ .../_trusted_access_roles_operations.py | 137 + .../v2024_07_01/models/__init__.py | 407 + .../models/_container_service_client_enums.py | 563 ++ .../v2024_07_01/models/_models_py3.py | 8231 +++++++++++++++++ .../v2024_07_01/models/_patch.py | 20 + .../v2024_07_01/operations/__init__.py | 39 + .../operations/_agent_pools_operations.py | 1508 +++ .../operations/_machines_operations.py | 307 + .../_maintenance_configurations_operations.py | 581 ++ .../_managed_clusters_operations.py | 3832 ++++++++ .../v2024_07_01/operations/_operations.py | 155 + .../v2024_07_01/operations/_patch.py | 20 + ...private_endpoint_connections_operations.py | 628 ++ .../_private_link_resources_operations.py | 158 + ...olve_private_link_service_id_operations.py | 239 + .../operations/_snapshots_operations.py | 819 ++ ...trusted_access_role_bindings_operations.py | 745 ++ .../_trusted_access_roles_operations.py | 168 + .../v2024_07_01/py.typed | 1 + .../__init__.py | 20 + .../_configuration.py | 67 + .../_serialization.py | 2012 ++++ .../_source_control_configuration_client.py | 515 ++ .../_version.py | 8 + .../aio/__init__.py | 10 + .../aio/_configuration.py | 67 + .../_source_control_configuration_client.py | 515 ++ .../models.py | 9 + .../py.typed | 1 + .../v2023_05_01/__init__.py | 26 + .../v2023_05_01/_configuration.py | 66 + .../v2023_05_01/_patch.py | 20 + .../_source_control_configuration_client.py | 128 + .../v2023_05_01/_vendor.py | 16 + .../v2023_05_01/_version.py | 9 + .../v2023_05_01/aio/__init__.py | 23 + .../v2023_05_01/aio/_configuration.py | 66 + .../v2023_05_01/aio/_patch.py | 20 + .../_source_control_configuration_client.py | 128 + .../v2023_05_01/aio/operations/__init__.py | 29 + .../aio/operations/_extensions_operations.py | 947 ++ ...flux_config_operation_status_operations.py | 138 + .../_flux_configurations_operations.py | 952 ++ .../_operation_status_operations.py | 243 + .../v2023_05_01/aio/operations/_operations.py | 136 + .../v2023_05_01/aio/operations/_patch.py | 20 + ...ource_control_configurations_operations.py | 575 ++ .../v2023_05_01/models/__init__.py | 137 + .../v2023_05_01/models/_models_py3.py | 2796 ++++++ .../v2023_05_01/models/_patch.py | 20 + ...urce_control_configuration_client_enums.py | 121 + .../v2023_05_01/operations/__init__.py | 29 + .../operations/_extensions_operations.py | 1152 +++ ...flux_config_operation_status_operations.py | 185 + .../_flux_configurations_operations.py | 1163 +++ .../_operation_status_operations.py | 329 + .../v2023_05_01/operations/_operations.py | 158 + .../v2023_05_01/operations/_patch.py | 20 + ...ource_control_configurations_operations.py | 747 ++ .../v2023_05_01/py.typed | 1 + 95 files changed, 47282 insertions(+), 131 deletions(-) create mode 100644 src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py delete mode 100644 src/dataprotection/azext_dataprotection/manual/aks/akshelper.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/py.typed create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_serialization.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_source_control_configuration_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_version.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_source_control_configuration_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/models.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/py.typed create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_source_control_configuration_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_vendor.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_version.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_configuration.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_source_control_configuration_client.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_extensions_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_config_operation_status_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operation_status_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_source_control_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_models_py3.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_source_control_configuration_client_enums.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/__init__.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_extensions_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_config_operation_status_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operation_status_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_patch.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_source_control_configurations_operations.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/py.typed diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index 586538a16d8..22265f27e37 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -5483,7 +5483,7 @@ def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: name=self.context.get_name(), ) - from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper + from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper dataprotection_enable_backup_helper(str(cluster_resource_id), json.dumps(backup_strategy), json.dumps(backup_configuration_parameters)) return mc diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py new file mode 100644 index 00000000000..55d17a8438a --- /dev/null +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -0,0 +1,322 @@ +import json +from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED +from azure.cli.core.commands.client_factory import get_mgmt_service_client + +def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): + print("datasourceId: " + datasource_id) + print("backupStrategy: " + backup_strategy) + print ("configurationParams: " + json.dumps(configuration_params)) + + cluster_subscription_id = datasource_id.split('/')[2] + cluster_resource_group_name = datasource_id.split('/')[4] + cluster_name = datasource_id.split('/')[8] + + from azure.cli.command_modules.role.custom import create_role_assignment + from azure.mgmt.resource import ResourceManagementClient + + resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) + cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") + cluster_location = cluster_resource.location + + """ + - Create backup vault and policy in the cluster resource group* + - Create backup resource group + - Create backup storage account and container + - Create backup extension + - Create trusted access role binding + - Assign all permissions + - Create backup instance + """ + + backup_resource_group_name = __generate_backup_resource_group_name(cluster_location, cluster_name) + print(f"Creating backup resource group ({backup_resource_group_name}) ...") + backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, {"location": cluster_location}) + print(f"Assigning 'Contributor' role to the cluster identity on the backup resource group ({backup_resource_group_name}) ...") + create_role_assignment( + cmd, + role="Contributor", + assignee=cluster_resource.identity.principal_id, + scope=backup_resource_group.id) + + from azure.mgmt.storage import StorageManagementClient + storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) + backup_storage_account_name = __generate_backup_storage_account_name(cluster_location) + print(f"Creating storage account ({backup_storage_account_name}) in the backup resource group ({backup_resource_group_name}) ...") + backup_storage_account = storage_client.storage_accounts.begin_create( + resource_group_name=backup_resource_group_name, + account_name=backup_storage_account_name, + parameters={ + "location": cluster_location, + "kind": "StorageV2", + "sku": {"name": "Standard_LRS"}, + "allow_blob_public_access": False + }).result() + + backup_storage_account_container_name = __generate_backup_storage_account_container_name(cluster_name) + print(f"Creating blob container ({backup_storage_account_container_name}) in the backup storage account ({backup_storage_account_name}) ...") + storage_client.blob_containers.create(backup_resource_group_name, backup_storage_account_name, backup_storage_account_container_name, {}) + + backup_extension = __create_backup_extension( + cmd, + cluster_subscription_id, + cluster_resource_group_name, + cluster_name, + backup_storage_account_name, + backup_storage_account_container_name, + backup_resource_group_name, + cluster_subscription_id) + + print(f"Assigning 'Storage Blob Data Contributor' role to the extension identity on the backup storage account ({backup_storage_account_name}) ...") + create_role_assignment( + cmd, + role="Storage Blob Data Contributor", + assignee=backup_extension.aks_assigned_identity.principal_id, + scope=backup_storage_account.id) + + from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Create as _BackupVaultCreate + backup_vault_name = __generate_backup_vault_name(cluster_location) + print(f"Creating backup vault ({backup_vault_name}) in the cluster resource group ({cluster_resource_group_name}) ...") + backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args={ + "vault_name": backup_vault_name, + "resource_group": cluster_resource_group_name, + "type": "SystemAssigned", + "storage_setting": [{'type': 'LocallyRedundant', 'datastore-type': 'VaultStore'}] + }).result() + + print(f"Assigning 'Reader' role to the backup vault identity on the cluster ({cluster_name}) ...") + create_role_assignment( + cmd, + role="Reader", + assignee=backup_vault["identity"]["principalId"], + scope=cluster_resource.id) + + print(f"Assigning 'Reader' role to the backup vault identity on the backup resource group ({backup_resource_group_name}) ...") + create_role_assignment( + cmd, + role="Reader", + assignee=backup_vault["identity"]["principalId"], + scope=backup_resource_group.id) + + print(f"Setting up trusted access between the cluster ({cluster_name}) and the backup vault ({backup_vault_name}) ...") + from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice import ContainerServiceClient + from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice.v2024_07_01.models import TrustedAccessRoleBinding + + cluster_client = get_mgmt_service_client(cmd.cli_ctx, ContainerServiceClient, subscription_id=cluster_subscription_id) + _trusted_access_role_binding = TrustedAccessRoleBinding( + source_resource_id=backup_vault["id"], + roles=["Microsoft.DataProtection/backupVaults/backup-operator"]) + + cluster_client.trusted_access_role_bindings.begin_create_or_update( + resource_group_name=cluster_resource_group_name, + resource_name=cluster_name, + trusted_access_role_binding_name=__generate_trusted_access_role_binding_name(backup_vault_name), + trusted_access_role_binding=_trusted_access_role_binding).result() + + print(f"Creating backup policy in the backup vault ({backup_vault_name}) ...") + from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate + backup_policy_name = __generate_backup_policy_name() + backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ + "backup_policy_name": backup_policy_name, + "resource_group": cluster_resource_group_name, + "vault_name": backup_vault_name, + "policy": { + "objectType": "BackupPolicy", + "datasourceTypes": [ + "Microsoft.ContainerService/managedClusters" + ], + "policyRules": [ + { + "isDefault": True, + "lifecycles": [ + { + "deleteAfter": { + "duration": "P1D", + "objectType": "AbsoluteDeleteOption" + }, + "sourceDataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "targetDataStoreCopySettings": [] + } + ], + "name": "Default", + "objectType": "AzureRetentionRule" + }, + { + "backupParameters": { + "backupType": "Incremental", + "objectType": "AzureBackupParams" + }, + "dataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "name": "BackupHourly", + "objectType": "AzureBackupRule", + "trigger": { + "objectType": "ScheduleBasedTriggerContext", + "schedule": { + "repeatingTimeIntervals": [ + "R/2024-01-01T00:00:00+00:00/PT6H" + ], + "timeZone": "Coordinated Universal Time" + }, + "taggingCriteria": [ + { + "isDefault": True, + "tagInfo": { + "id": "Default_", + "tagName": "Default" + }, + "taggingPriority": 99 + } + ] + } + } + ] + } + }) + + print(f"Running final validation and configuring backup for the cluster ({cluster_name}) ...") + from azext_dataprotection.manual.aaz_operations.backup_instance import ValidateAndCreate as _BackupInstanceValidateAndCreate + + import uuid + backup_instance_name = f"{cluster_name}-{uuid.uuid4()}" + backup_instance = _BackupInstanceValidateAndCreate(cli_ctx=cmd.cli_ctx)(command_args={ + "backup_instance_name": backup_instance_name, + "resource_group": cluster_resource_group_name, + "vault_name": backup_vault_name, + "backup_instance": { + "backup_instance_name": backup_instance_name, + "properties": { + "friendly_name": f"{cluster_name}\\fullbackup", + "object_type": "BackupInstance", + "data_source_info": { + "datasource_type": "Microsoft.ContainerService/managedClusters", + "object_type": "Datasource", + "resource_id": datasource_id, + "resource_location": cluster_location, + "resource_name": cluster_name, + "resource_type": "Microsoft.ContainerService/managedclusters", + "resource_uri": datasource_id + }, + "data_source_set_info": { + "datasource_type": "Microsoft.ContainerService/managedClusters", + "object_type": "DatasourceSet", + "resource_id": datasource_id, + "resource_location": cluster_location, + "resource_name": cluster_name, + "resource_type": "Microsoft.ContainerService/managedclusters", + "resource_uri": datasource_id + }, + "policy_info": { + "policy_id": backup_policy["id"], + # "policy_id": "/subscriptions/f0c630e0-2995-4853-b056-0b3c09cb673f/resourceGroups/rg2eacanrraj/providers/Microsoft.DataProtection/backupVaults/hackvault/backupPolicies/def", + "policy_parameters": { + "backup_datasource_parameters_list": [ + { + "objectType": "KubernetesClusterBackupDatasourceParameters", + "include_cluster_scope_resources": True, + "snapshot_volumes": True + } + ], + "data_store_parameters_list": [ + { + "object_type": "AzureOperationalStoreParameters", + "data_store_type": "OperationalStore", + "resource_group_id": backup_resource_group.id + } + ] + } + } + } + } + }).result() + + print(f"Kubernetes cluster ({cluster_name}) protected successfully.") + +def __generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): + return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" + +def __generate_backup_resource_group_name(cluster_location, cluster_name): + return f"rg_azurebackup_{cluster_location}_{cluster_name}" + +def __generate_backup_storage_account_name(cluster_location): + return f"kubernetesbackup{cluster_location}" + +def __generate_backup_storage_account_container_name(cluster_name): + return f"backup-{cluster_name}" + +def __generate_backup_vault_name(cluster_location): + return f"backupvault-{cluster_location}" + +def __generate_backup_policy_name(): + return f"defaultbackuppolicy" + +def __generate_trusted_access_role_binding_name(backup_vault_name): + return f"backup-howtogetid" + +def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name, storage_account_name, storage_account_container_name, storage_account_resource_group, storage_account_subscription_id): + from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient + k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=subscription_id) + + extensions = k8s_configuration_client.extensions.list( + cluster_rp="Microsoft.ContainerService", + cluster_resource_name="managedClusters", + resource_group_name=resource_group_name, + cluster_name=cluster_name) + + for page in extensions.by_page(): + for extension in page: + if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': + print(f"Data protection extension ({extension.name}) is already installed in the cluster ({cluster_name}).") + return extension + + print(f"Installing data protection extension (azure-aks-backup) in the cluster ({cluster_name}) ...") + + from azure.cli.core.extension.operations import add_extension_to_path + from importlib import import_module + add_extension_to_path("k8s-extension") + K8s_extension_client_factory = import_module("azext_k8s_extension._client_factory") + k8s_extension_module = import_module("azext_k8s_extension.custom") + + return k8s_extension_module.create_k8s_extension( + cmd=cmd, + client=K8s_extension_client_factory.cf_k8s_extension_operation(cmd.cli_ctx), + resource_group_name=resource_group_name, + cluster_name=cluster_name, + name="azure-aks-backup", + cluster_type="managedClusters", + extension_type="microsoft.dataprotection.kubernetes", + cluster_resource_provider="Microsoft.ContainerService", + scope="cluster", + auto_upgrade_minor_version=True, + release_train="stable", + configuration_settings=[{ + "blobContainer": storage_account_container_name, + "storageAccount": storage_account_name, + "storageAccountResourceGroup": storage_account_resource_group, + "storageAccountSubscriptionId": storage_account_subscription_id + }] + ).result() + + # + # Check if there is an SA in cluster RG with azure tag - clusterName = backup + # If not, create one with name bkp- (4) (4) sha256 of cluster URI + # + # + # + # P2 - Using Extension routing, if there is a BI already for the cluster. If there, is print the vault name where it resides. (this can be the very first step) + # + # + # Check if there is a backupvault in the subscription with tag, default=true + # + # If there is no such backup vault, create a resource group with tag backup-resource-group=true + # Create a backup vault in the resource group with tag default=true + # + # Check if the Vault has a policy with params matching Recommended Policy params + # + # + # + # diff --git a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py b/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py deleted file mode 100644 index 2e0dd42c598..00000000000 --- a/src/dataprotection/azext_dataprotection/manual/aks/akshelper.py +++ /dev/null @@ -1,129 +0,0 @@ -import json -from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED -from azure.cli.core.commands.client_factory import get_mgmt_service_client - -def dataprotection_enable_backup_helper(cmd, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): - # Do GET on exten - print("Do GET on extension") - print("datasourceUri: " + datasource_uri) - print("backupStrategy: " + backup_strategy) - print ("configurationParams: " + json.dumps(configuration_params)) - - # extract subscriptoin ID from datasource_uri - cluster_subscription_id = datasource_uri.split('/')[2] - cluster_resource_group_name = datasource_uri.split('/')[4] - cluster_name = datasource_uri.split('/')[8] - - storage_account_subscription_id = "f0c630e0-2995-4853-b056-0b3c09cb673f" - storage_account_resource_group_name = "rg2eacanrraj" - storage_account_name = "tinysarraj" - storage_account_container_name = "container" - - backup_vault_subscription_id = "f0c630e0-2995-4853-b056-0b3c09cb673f" - backup_vault_resource_group = "rgwerraj" - backup_vault_name = "vaultwerraj" - - - """ - - Create backup vault and policy in the cluster resource group* - - Create backup resource group - - Create backup storage account and container - - Create backup extension - - Create trusted access role binding - - Assign all permissions - - Create backup instance - """ - - storage_account_arm_id = __generate_arm_id(storage_account_subscription_id, storage_account_resource_group_name, "Microsoft.Storage/storageAccounts", storage_account_name) - - # backup_extension = __create_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name) - from azure.cli.command_modules.role.custom import list_role_assignments, create_role_assignment - - create_role_assignment( - cmd, - assignee="e433a43c-9667-4e6a-9f73-8213565eb49e", - # assignee=backup_extension.aks_assigned_identity.principal_id, - role="Storage Blob Data Contributor", - scope=storage_account_arm_id) - - - -# Example usage -# __create_backup_storage_account_and_container(cli_ctx, "your_subscription_id", "your_resource_group_name", "your_storage_account_name", "your_container_name", "eastus") - - -# Example usage -# __create_resource_group(cli_ctx, "your_subscription_id", "your_resource_group_name", "eastus") - -def __generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): - return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" - -def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name): - from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient - from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration.v2023_05_01.models import Extension - - k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=subscription_id) - - extensions = k8s_configuration_client.extensions.list( - cluster_rp="Microsoft.ContainerService", - cluster_resource_name="managedClusters", - resource_group_name=resource_group_name, - cluster_name=cluster_name) - - for page in extensions.by_page(): - for extension in page: - if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': - print("Extension found: " + extension.name) - break - - print("Creating backup extension...") - - from azure.cli.core.extension.operations import add_extension_to_path - from importlib import import_module - add_extension_to_path("k8s-extension") - K8s_extension_client_factory = import_module("azext_k8s_extension._client_factory") - k8s_extension_module = import_module("azext_k8s_extension.custom") - - # return k8s_extension_module.create_k8s_extension( - # cmd=cmd, - # client=K8s_extension_client_factory.cf_k8s_extension_operation(cmd.cli_ctx), - # resource_group_name=resource_group_name, - # cluster_name=cluster_name, - # name="azure-aks-backup", - # cluster_type="managedClusters", - # extension_type="microsoft.dataprotection.kubernetes", - # cluster_resource_provider="Microsoft.ContainerService", - # scope="cluster", - # auto_upgrade_minor_version=True, - # release_train="stable", - # configuration_settings=[{ - # "blobContainer": "container", - # "storageAccount": "tinysarraj", - # "storageAccountResourceGroup": "rg2eacanrraj", - # "storageAccountSubscriptionId": "f0c630e0-2995-4853-b056-0b3c09cb673f" - # }] - # ).result() - - - # print(response) - # print(response.result()) - - # - # Check if there is an SA in cluster RG with azure tag - clusterName = backup - # If not, create one with name bkp- (4) (4) sha256 of cluster URI - # - # - # - # P2 - Using Extension routing, if there is a BI already for the cluster. If there, is print the vault name where it resides. (this can be the very first step) - # - # - # Check if there is a backupvault in the subscription with tag, default=true - # - # If there is no such backup vault, create a resource group with tag backup-resource-group=true - # Create a backup vault in the resource group with tag default=true - # - # Check if the Vault has a policy with params matching Recommended Policy params - # - # - # - # diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 1d6ced09bd6..bbc0b0e268f 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1022,7 +1022,7 @@ def dataprotection_enable_backup(cmd, datasource_uri: str, backup_strategy=CONST # if uri contains case insensitive Microsoft.ContainerService/managedClusters contains and add if check if "Microsoft.ContainerService/managedClusters".lower() in datasource_uri.lower(): - from azext_dataprotection.manual.aks.akshelper import dataprotection_enable_backup_helper + from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper dataprotection_enable_backup_helper(cmd, datasource_uri, backup_strategy, configuration_params) return else: diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py new file mode 100644 index 00000000000..c9e1e010e0e --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._container_service_client import ContainerServiceClient +__all__ = ['ContainerServiceClient'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass + +from ._version import VERSION + +__version__ = VERSION diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py new file mode 100644 index 00000000000..d0d261e8db8 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + +class ContainerServiceClientConfiguration: + """Configuration for ContainerServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + **kwargs: Any + ): + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'azure-mgmt-containerservice/{}'.format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py new file mode 100644 index 00000000000..d3dcf9c29cd --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py @@ -0,0 +1,2655 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.mgmt.core import ARMPipelineClient +from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy +from azure.profiles import KnownProfiles, ProfileDefinition +from azure.profiles.multiapiclient import MultiApiClientMixin + +from ._configuration import ContainerServiceClientConfiguration +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + +class _SDKClient(object): + def __init__(self, *args, **kwargs): + """This is a fake class to support current implemetation of MultiApiClientMixin." + Will be removed in final version of multiapi azure-core based client + """ + pass + +class ContainerServiceClient(MultiApiClientMixin, _SDKClient): + """The Container Service Client. + + This ready contains multiple API versions, to help you deal with all of the Azure clouds + (Azure Stack, Azure Government, Azure China, etc.). + By default, it uses the latest API version available on public Azure. + For production, you should stick to a particular api-version and/or profile. + The profile sets a mapping between an operation group and its API version. + The api-version parameter sets the default API version if the operation + group is not described in the profile. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param api_version: API version to use if no profile is provided, or if missing in profile. + :type api_version: str + :param base_url: Service URL + :type base_url: str + :param profile: A profile definition, from KnownProfiles to dict. + :type profile: azure.profiles.KnownProfiles + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + DEFAULT_API_VERSION = '2024-07-01' + _PROFILE_TAG = "azure.mgmt.containerservice.ContainerServiceClient" + LATEST_PROFILE = ProfileDefinition({ + _PROFILE_TAG: { + None: DEFAULT_API_VERSION, + 'container_services': '2019-04-01', + 'fleet_members': '2022-09-02-preview', + 'fleets': '2022-09-02-preview', + 'load_balancers': '2024-06-02-preview', + 'managed_cluster_snapshots': '2024-06-02-preview', + 'open_shift_managed_clusters': '2019-04-30', + 'operation_status_result': '2024-06-02-preview', + }}, + _PROFILE_TAG + " latest" + ) + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + api_version: Optional[str]=None, + base_url: str = "https://management.azure.com", + profile: KnownProfiles=KnownProfiles.default, + **kwargs: Any + ): + if api_version: + kwargs.setdefault('api_version', api_version) + self._config = ContainerServiceClientConfiguration(credential, subscription_id, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + ARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client = ARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) + super(ContainerServiceClient, self).__init__( + api_version=api_version, + profile=profile + ) + + @classmethod + def _models_dict(cls, api_version): + return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} + + @classmethod + def models(cls, api_version=DEFAULT_API_VERSION): + """Module depends on the API version: + + * 2017-07-01: :mod:`v2017_07_01.models` + * 2018-03-31: :mod:`v2018_03_31.models` + * 2018-08-01-preview: :mod:`v2018_08_01_preview.models` + * 2018-09-30-preview: :mod:`v2018_09_30_preview.models` + * 2019-02-01: :mod:`v2019_02_01.models` + * 2019-04-01: :mod:`v2019_04_01.models` + * 2019-04-30: :mod:`v2019_04_30.models` + * 2019-06-01: :mod:`v2019_06_01.models` + * 2019-08-01: :mod:`v2019_08_01.models` + * 2019-09-30-preview: :mod:`v2019_09_30_preview.models` + * 2019-10-01: :mod:`v2019_10_01.models` + * 2019-10-27-preview: :mod:`v2019_10_27_preview.models` + * 2019-11-01: :mod:`v2019_11_01.models` + * 2020-01-01: :mod:`v2020_01_01.models` + * 2020-02-01: :mod:`v2020_02_01.models` + * 2020-03-01: :mod:`v2020_03_01.models` + * 2020-04-01: :mod:`v2020_04_01.models` + * 2020-06-01: :mod:`v2020_06_01.models` + * 2020-07-01: :mod:`v2020_07_01.models` + * 2020-09-01: :mod:`v2020_09_01.models` + * 2020-11-01: :mod:`v2020_11_01.models` + * 2020-12-01: :mod:`v2020_12_01.models` + * 2021-02-01: :mod:`v2021_02_01.models` + * 2021-03-01: :mod:`v2021_03_01.models` + * 2021-05-01: :mod:`v2021_05_01.models` + * 2021-07-01: :mod:`v2021_07_01.models` + * 2021-08-01: :mod:`v2021_08_01.models` + * 2021-09-01: :mod:`v2021_09_01.models` + * 2021-10-01: :mod:`v2021_10_01.models` + * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` + * 2022-01-01: :mod:`v2022_01_01.models` + * 2022-01-02-preview: :mod:`v2022_01_02_preview.models` + * 2022-02-01: :mod:`v2022_02_01.models` + * 2022-02-02-preview: :mod:`v2022_02_02_preview.models` + * 2022-03-01: :mod:`v2022_03_01.models` + * 2022-03-02-preview: :mod:`v2022_03_02_preview.models` + * 2022-04-01: :mod:`v2022_04_01.models` + * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` + * 2022-05-02-preview: :mod:`v2022_05_02_preview.models` + * 2022-06-01: :mod:`v2022_06_01.models` + * 2022-06-02-preview: :mod:`v2022_06_02_preview.models` + * 2022-07-01: :mod:`v2022_07_01.models` + * 2022-07-02-preview: :mod:`v2022_07_02_preview.models` + * 2022-08-02-preview: :mod:`v2022_08_02_preview.models` + * 2022-08-03-preview: :mod:`v2022_08_03_preview.models` + * 2022-09-01: :mod:`v2022_09_01.models` + * 2022-09-02-preview: :mod:`v2022_09_02_preview.models` + * 2022-10-02-preview: :mod:`v2022_10_02_preview.models` + * 2022-11-01: :mod:`v2022_11_01.models` + * 2022-11-02-preview: :mod:`v2022_11_02_preview.models` + * 2023-01-01: :mod:`v2023_01_01.models` + * 2023-01-02-preview: :mod:`v2023_01_02_preview.models` + * 2023-02-01: :mod:`v2023_02_01.models` + * 2023-02-02-preview: :mod:`v2023_02_02_preview.models` + * 2023-03-01: :mod:`v2023_03_01.models` + * 2023-03-02-preview: :mod:`v2023_03_02_preview.models` + * 2023-04-01: :mod:`v2023_04_01.models` + * 2023-04-02-preview: :mod:`v2023_04_02_preview.models` + * 2023-05-01: :mod:`v2023_05_01.models` + * 2023-05-02-preview: :mod:`v2023_05_02_preview.models` + * 2023-06-01: :mod:`v2023_06_01.models` + * 2023-06-02-preview: :mod:`v2023_06_02_preview.models` + * 2023-07-01: :mod:`v2023_07_01.models` + * 2023-07-02-preview: :mod:`v2023_07_02_preview.models` + * 2023-08-01: :mod:`v2023_08_01.models` + * 2023-08-02-preview: :mod:`v2023_08_02_preview.models` + * 2023-09-01: :mod:`v2023_09_01.models` + * 2023-09-02-preview: :mod:`v2023_09_02_preview.models` + * 2023-10-01: :mod:`v2023_10_01.models` + * 2023-10-02-preview: :mod:`v2023_10_02_preview.models` + * 2023-11-01: :mod:`v2023_11_01.models` + * 2023-11-02-preview: :mod:`v2023_11_02_preview.models` + * 2024-01-01: :mod:`v2024_01_01.models` + * 2024-01-02-preview: :mod:`v2024_01_02_preview.models` + * 2024-02-01: :mod:`v2024_02_01.models` + * 2024-02-02-preview: :mod:`v2024_02_02_preview.models` + * 2024-03-02-preview: :mod:`v2024_03_02_preview.models` + * 2024-04-02-preview: :mod:`v2024_04_02_preview.models` + * 2024-05-01: :mod:`v2024_05_01.models` + * 2024-05-02-preview: :mod:`v2024_05_02_preview.models` + * 2024-06-02-preview: :mod:`v2024_06_02_preview.models` + * 2024-07-01: :mod:`v2024_07_01.models` + """ + if api_version == '2017-07-01': + from .v2017_07_01 import models + return models + elif api_version == '2018-03-31': + from .v2018_03_31 import models + return models + elif api_version == '2018-08-01-preview': + from .v2018_08_01_preview import models + return models + elif api_version == '2018-09-30-preview': + from .v2018_09_30_preview import models + return models + elif api_version == '2019-02-01': + from .v2019_02_01 import models + return models + elif api_version == '2019-04-01': + from .v2019_04_01 import models + return models + elif api_version == '2019-04-30': + from .v2019_04_30 import models + return models + elif api_version == '2019-06-01': + from .v2019_06_01 import models + return models + elif api_version == '2019-08-01': + from .v2019_08_01 import models + return models + elif api_version == '2019-09-30-preview': + from .v2019_09_30_preview import models + return models + elif api_version == '2019-10-01': + from .v2019_10_01 import models + return models + elif api_version == '2019-10-27-preview': + from .v2019_10_27_preview import models + return models + elif api_version == '2019-11-01': + from .v2019_11_01 import models + return models + elif api_version == '2020-01-01': + from .v2020_01_01 import models + return models + elif api_version == '2020-02-01': + from .v2020_02_01 import models + return models + elif api_version == '2020-03-01': + from .v2020_03_01 import models + return models + elif api_version == '2020-04-01': + from .v2020_04_01 import models + return models + elif api_version == '2020-06-01': + from .v2020_06_01 import models + return models + elif api_version == '2020-07-01': + from .v2020_07_01 import models + return models + elif api_version == '2020-09-01': + from .v2020_09_01 import models + return models + elif api_version == '2020-11-01': + from .v2020_11_01 import models + return models + elif api_version == '2020-12-01': + from .v2020_12_01 import models + return models + elif api_version == '2021-02-01': + from .v2021_02_01 import models + return models + elif api_version == '2021-03-01': + from .v2021_03_01 import models + return models + elif api_version == '2021-05-01': + from .v2021_05_01 import models + return models + elif api_version == '2021-07-01': + from .v2021_07_01 import models + return models + elif api_version == '2021-08-01': + from .v2021_08_01 import models + return models + elif api_version == '2021-09-01': + from .v2021_09_01 import models + return models + elif api_version == '2021-10-01': + from .v2021_10_01 import models + return models + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview import models + return models + elif api_version == '2022-01-01': + from .v2022_01_01 import models + return models + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview import models + return models + elif api_version == '2022-02-01': + from .v2022_02_01 import models + return models + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview import models + return models + elif api_version == '2022-03-01': + from .v2022_03_01 import models + return models + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview import models + return models + elif api_version == '2022-04-01': + from .v2022_04_01 import models + return models + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview import models + return models + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview import models + return models + elif api_version == '2022-06-01': + from .v2022_06_01 import models + return models + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview import models + return models + elif api_version == '2022-07-01': + from .v2022_07_01 import models + return models + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview import models + return models + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview import models + return models + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview import models + return models + elif api_version == '2022-09-01': + from .v2022_09_01 import models + return models + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview import models + return models + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview import models + return models + elif api_version == '2022-11-01': + from .v2022_11_01 import models + return models + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview import models + return models + elif api_version == '2023-01-01': + from .v2023_01_01 import models + return models + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview import models + return models + elif api_version == '2023-02-01': + from .v2023_02_01 import models + return models + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview import models + return models + elif api_version == '2023-03-01': + from .v2023_03_01 import models + return models + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview import models + return models + elif api_version == '2023-04-01': + from .v2023_04_01 import models + return models + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview import models + return models + elif api_version == '2023-05-01': + from .v2023_05_01 import models + return models + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview import models + return models + elif api_version == '2023-06-01': + from .v2023_06_01 import models + return models + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview import models + return models + elif api_version == '2023-07-01': + from .v2023_07_01 import models + return models + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview import models + return models + elif api_version == '2023-08-01': + from .v2023_08_01 import models + return models + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview import models + return models + elif api_version == '2023-09-01': + from .v2023_09_01 import models + return models + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview import models + return models + elif api_version == '2023-10-01': + from .v2023_10_01 import models + return models + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview import models + return models + elif api_version == '2023-11-01': + from .v2023_11_01 import models + return models + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview import models + return models + elif api_version == '2024-01-01': + from .v2024_01_01 import models + return models + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview import models + return models + elif api_version == '2024-02-01': + from .v2024_02_01 import models + return models + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview import models + return models + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview import models + return models + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview import models + return models + elif api_version == '2024-05-01': + from .v2024_05_01 import models + return models + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview import models + return models + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview import models + return models + elif api_version == '2024-07-01': + from .v2024_07_01 import models + return models + raise ValueError("API version {} is not available".format(api_version)) + + @property + def agent_pools(self): + """Instance depends on the API version: + + * 2019-02-01: :class:`AgentPoolsOperations` + * 2019-04-01: :class:`AgentPoolsOperations` + * 2019-06-01: :class:`AgentPoolsOperations` + * 2019-08-01: :class:`AgentPoolsOperations` + * 2019-10-01: :class:`AgentPoolsOperations` + * 2019-11-01: :class:`AgentPoolsOperations` + * 2020-01-01: :class:`AgentPoolsOperations` + * 2020-02-01: :class:`AgentPoolsOperations` + * 2020-03-01: :class:`AgentPoolsOperations` + * 2020-04-01: :class:`AgentPoolsOperations` + * 2020-06-01: :class:`AgentPoolsOperations` + * 2020-07-01: :class:`AgentPoolsOperations` + * 2020-09-01: :class:`AgentPoolsOperations` + * 2020-11-01: :class:`AgentPoolsOperations` + * 2020-12-01: :class:`AgentPoolsOperations` + * 2021-02-01: :class:`AgentPoolsOperations` + * 2021-03-01: :class:`AgentPoolsOperations` + * 2021-05-01: :class:`AgentPoolsOperations` + * 2021-07-01: :class:`AgentPoolsOperations` + * 2021-08-01: :class:`AgentPoolsOperations` + * 2021-09-01: :class:`AgentPoolsOperations` + * 2021-10-01: :class:`AgentPoolsOperations` + * 2021-11-01-preview: :class:`AgentPoolsOperations` + * 2022-01-01: :class:`AgentPoolsOperations` + * 2022-01-02-preview: :class:`AgentPoolsOperations` + * 2022-02-01: :class:`AgentPoolsOperations` + * 2022-02-02-preview: :class:`AgentPoolsOperations` + * 2022-03-01: :class:`AgentPoolsOperations` + * 2022-03-02-preview: :class:`AgentPoolsOperations` + * 2022-04-01: :class:`AgentPoolsOperations` + * 2022-04-02-preview: :class:`AgentPoolsOperations` + * 2022-05-02-preview: :class:`AgentPoolsOperations` + * 2022-06-01: :class:`AgentPoolsOperations` + * 2022-06-02-preview: :class:`AgentPoolsOperations` + * 2022-07-01: :class:`AgentPoolsOperations` + * 2022-07-02-preview: :class:`AgentPoolsOperations` + * 2022-08-02-preview: :class:`AgentPoolsOperations` + * 2022-08-03-preview: :class:`AgentPoolsOperations` + * 2022-09-01: :class:`AgentPoolsOperations` + * 2022-09-02-preview: :class:`AgentPoolsOperations` + * 2022-10-02-preview: :class:`AgentPoolsOperations` + * 2022-11-01: :class:`AgentPoolsOperations` + * 2022-11-02-preview: :class:`AgentPoolsOperations` + * 2023-01-01: :class:`AgentPoolsOperations` + * 2023-01-02-preview: :class:`AgentPoolsOperations` + * 2023-02-01: :class:`AgentPoolsOperations` + * 2023-02-02-preview: :class:`AgentPoolsOperations` + * 2023-03-01: :class:`AgentPoolsOperations` + * 2023-03-02-preview: :class:`AgentPoolsOperations` + * 2023-04-01: :class:`AgentPoolsOperations` + * 2023-04-02-preview: :class:`AgentPoolsOperations` + * 2023-05-01: :class:`AgentPoolsOperations` + * 2023-05-02-preview: :class:`AgentPoolsOperations` + * 2023-06-01: :class:`AgentPoolsOperations` + * 2023-06-02-preview: :class:`AgentPoolsOperations` + * 2023-07-01: :class:`AgentPoolsOperations` + * 2023-07-02-preview: :class:`AgentPoolsOperations` + * 2023-08-01: :class:`AgentPoolsOperations` + * 2023-08-02-preview: :class:`AgentPoolsOperations` + * 2023-09-01: :class:`AgentPoolsOperations` + * 2023-09-02-preview: :class:`AgentPoolsOperations` + * 2023-10-01: :class:`AgentPoolsOperations` + * 2023-10-02-preview: :class:`AgentPoolsOperations` + * 2023-11-01: :class:`AgentPoolsOperations` + * 2023-11-02-preview: :class:`AgentPoolsOperations` + * 2024-01-01: :class:`AgentPoolsOperations` + * 2024-01-02-preview: :class:`AgentPoolsOperations` + * 2024-02-01: :class:`AgentPoolsOperations` + * 2024-02-02-preview: :class:`AgentPoolsOperations` + * 2024-03-02-preview: :class:`AgentPoolsOperations` + * 2024-04-02-preview: :class:`AgentPoolsOperations` + * 2024-05-01: :class:`AgentPoolsOperations` + * 2024-05-02-preview: :class:`AgentPoolsOperations` + * 2024-06-02-preview: :class:`AgentPoolsOperations` + * 2024-07-01: :class:`AgentPoolsOperations` + """ + api_version = self._get_api_version('agent_pools') + if api_version == '2019-02-01': + from .v2019_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-04-01': + from .v2019_04_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-06-01': + from .v2019_06_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-08-01': + from .v2019_08_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-10-01': + from .v2019_10_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-11-01': + from .v2019_11_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-01-01': + from .v2020_01_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-02-01': + from .v2020_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-03-01': + from .v2020_03_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-04-01': + from .v2020_04_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-06-01': + from .v2020_06_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-07-01': + from .v2020_07_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-09-01': + from .v2020_09_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import AgentPoolsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'agent_pools'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def container_services(self): + """Instance depends on the API version: + + * 2017-07-01: :class:`ContainerServicesOperations` + * 2019-04-01: :class:`ContainerServicesOperations` + """ + api_version = self._get_api_version('container_services') + if api_version == '2017-07-01': + from .v2017_07_01.operations import ContainerServicesOperations as OperationClass + elif api_version == '2019-04-01': + from .v2019_04_01.operations import ContainerServicesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'container_services'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def fleet_members(self): + """Instance depends on the API version: + + * 2022-06-02-preview: :class:`FleetMembersOperations` + * 2022-07-02-preview: :class:`FleetMembersOperations` + * 2022-09-02-preview: :class:`FleetMembersOperations` + """ + api_version = self._get_api_version('fleet_members') + if api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import FleetMembersOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import FleetMembersOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import FleetMembersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'fleet_members'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def fleets(self): + """Instance depends on the API version: + + * 2022-06-02-preview: :class:`FleetsOperations` + * 2022-07-02-preview: :class:`FleetsOperations` + * 2022-09-02-preview: :class:`FleetsOperations` + """ + api_version = self._get_api_version('fleets') + if api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import FleetsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import FleetsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import FleetsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'fleets'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def load_balancers(self): + """Instance depends on the API version: + + * 2024-03-02-preview: :class:`LoadBalancersOperations` + * 2024-04-02-preview: :class:`LoadBalancersOperations` + * 2024-05-02-preview: :class:`LoadBalancersOperations` + * 2024-06-02-preview: :class:`LoadBalancersOperations` + """ + api_version = self._get_api_version('load_balancers') + if api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import LoadBalancersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'load_balancers'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def machines(self): + """Instance depends on the API version: + + * 2023-07-02-preview: :class:`MachinesOperations` + * 2023-08-02-preview: :class:`MachinesOperations` + * 2023-09-02-preview: :class:`MachinesOperations` + * 2023-10-02-preview: :class:`MachinesOperations` + * 2023-11-02-preview: :class:`MachinesOperations` + * 2024-01-02-preview: :class:`MachinesOperations` + * 2024-02-02-preview: :class:`MachinesOperations` + * 2024-03-02-preview: :class:`MachinesOperations` + * 2024-04-02-preview: :class:`MachinesOperations` + * 2024-05-02-preview: :class:`MachinesOperations` + * 2024-06-02-preview: :class:`MachinesOperations` + * 2024-07-01: :class:`MachinesOperations` + """ + api_version = self._get_api_version('machines') + if api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import MachinesOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import MachinesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'machines'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def maintenance_configurations(self): + """Instance depends on the API version: + + * 2020-12-01: :class:`MaintenanceConfigurationsOperations` + * 2021-02-01: :class:`MaintenanceConfigurationsOperations` + * 2021-03-01: :class:`MaintenanceConfigurationsOperations` + * 2021-05-01: :class:`MaintenanceConfigurationsOperations` + * 2021-07-01: :class:`MaintenanceConfigurationsOperations` + * 2021-08-01: :class:`MaintenanceConfigurationsOperations` + * 2021-09-01: :class:`MaintenanceConfigurationsOperations` + * 2021-10-01: :class:`MaintenanceConfigurationsOperations` + * 2021-11-01-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-01-01: :class:`MaintenanceConfigurationsOperations` + * 2022-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-02-01: :class:`MaintenanceConfigurationsOperations` + * 2022-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-03-01: :class:`MaintenanceConfigurationsOperations` + * 2022-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-04-01: :class:`MaintenanceConfigurationsOperations` + * 2022-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-06-01: :class:`MaintenanceConfigurationsOperations` + * 2022-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-07-01: :class:`MaintenanceConfigurationsOperations` + * 2022-07-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-08-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-08-03-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-09-01: :class:`MaintenanceConfigurationsOperations` + * 2022-09-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-10-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-11-01: :class:`MaintenanceConfigurationsOperations` + * 2022-11-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-01-01: :class:`MaintenanceConfigurationsOperations` + * 2023-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-02-01: :class:`MaintenanceConfigurationsOperations` + * 2023-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-03-01: :class:`MaintenanceConfigurationsOperations` + * 2023-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-04-01: :class:`MaintenanceConfigurationsOperations` + * 2023-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-05-01: :class:`MaintenanceConfigurationsOperations` + * 2023-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-06-01: :class:`MaintenanceConfigurationsOperations` + * 2023-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-07-01: :class:`MaintenanceConfigurationsOperations` + * 2023-07-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-08-01: :class:`MaintenanceConfigurationsOperations` + * 2023-08-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-09-01: :class:`MaintenanceConfigurationsOperations` + * 2023-09-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-10-01: :class:`MaintenanceConfigurationsOperations` + * 2023-10-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-11-01: :class:`MaintenanceConfigurationsOperations` + * 2023-11-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-01-01: :class:`MaintenanceConfigurationsOperations` + * 2024-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-02-01: :class:`MaintenanceConfigurationsOperations` + * 2024-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-05-01: :class:`MaintenanceConfigurationsOperations` + * 2024-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-07-01: :class:`MaintenanceConfigurationsOperations` + """ + api_version = self._get_api_version('maintenance_configurations') + if api_version == '2020-12-01': + from .v2020_12_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import MaintenanceConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'maintenance_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def managed_cluster_snapshots(self): + """Instance depends on the API version: + + * 2022-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-07-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-08-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-08-03-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-09-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-10-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-11-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-01-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-07-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-08-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-09-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-10-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-11-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-01-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + """ + api_version = self._get_api_version('managed_cluster_snapshots') + if api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'managed_cluster_snapshots'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def managed_clusters(self): + """Instance depends on the API version: + + * 2018-03-31: :class:`ManagedClustersOperations` + * 2018-08-01-preview: :class:`ManagedClustersOperations` + * 2019-02-01: :class:`ManagedClustersOperations` + * 2019-04-01: :class:`ManagedClustersOperations` + * 2019-06-01: :class:`ManagedClustersOperations` + * 2019-08-01: :class:`ManagedClustersOperations` + * 2019-10-01: :class:`ManagedClustersOperations` + * 2019-11-01: :class:`ManagedClustersOperations` + * 2020-01-01: :class:`ManagedClustersOperations` + * 2020-02-01: :class:`ManagedClustersOperations` + * 2020-03-01: :class:`ManagedClustersOperations` + * 2020-04-01: :class:`ManagedClustersOperations` + * 2020-06-01: :class:`ManagedClustersOperations` + * 2020-07-01: :class:`ManagedClustersOperations` + * 2020-09-01: :class:`ManagedClustersOperations` + * 2020-11-01: :class:`ManagedClustersOperations` + * 2020-12-01: :class:`ManagedClustersOperations` + * 2021-02-01: :class:`ManagedClustersOperations` + * 2021-03-01: :class:`ManagedClustersOperations` + * 2021-05-01: :class:`ManagedClustersOperations` + * 2021-07-01: :class:`ManagedClustersOperations` + * 2021-08-01: :class:`ManagedClustersOperations` + * 2021-09-01: :class:`ManagedClustersOperations` + * 2021-10-01: :class:`ManagedClustersOperations` + * 2021-11-01-preview: :class:`ManagedClustersOperations` + * 2022-01-01: :class:`ManagedClustersOperations` + * 2022-01-02-preview: :class:`ManagedClustersOperations` + * 2022-02-01: :class:`ManagedClustersOperations` + * 2022-02-02-preview: :class:`ManagedClustersOperations` + * 2022-03-01: :class:`ManagedClustersOperations` + * 2022-03-02-preview: :class:`ManagedClustersOperations` + * 2022-04-01: :class:`ManagedClustersOperations` + * 2022-04-02-preview: :class:`ManagedClustersOperations` + * 2022-05-02-preview: :class:`ManagedClustersOperations` + * 2022-06-01: :class:`ManagedClustersOperations` + * 2022-06-02-preview: :class:`ManagedClustersOperations` + * 2022-07-01: :class:`ManagedClustersOperations` + * 2022-07-02-preview: :class:`ManagedClustersOperations` + * 2022-08-02-preview: :class:`ManagedClustersOperations` + * 2022-08-03-preview: :class:`ManagedClustersOperations` + * 2022-09-01: :class:`ManagedClustersOperations` + * 2022-09-02-preview: :class:`ManagedClustersOperations` + * 2022-10-02-preview: :class:`ManagedClustersOperations` + * 2022-11-01: :class:`ManagedClustersOperations` + * 2022-11-02-preview: :class:`ManagedClustersOperations` + * 2023-01-01: :class:`ManagedClustersOperations` + * 2023-01-02-preview: :class:`ManagedClustersOperations` + * 2023-02-01: :class:`ManagedClustersOperations` + * 2023-02-02-preview: :class:`ManagedClustersOperations` + * 2023-03-01: :class:`ManagedClustersOperations` + * 2023-03-02-preview: :class:`ManagedClustersOperations` + * 2023-04-01: :class:`ManagedClustersOperations` + * 2023-04-02-preview: :class:`ManagedClustersOperations` + * 2023-05-01: :class:`ManagedClustersOperations` + * 2023-05-02-preview: :class:`ManagedClustersOperations` + * 2023-06-01: :class:`ManagedClustersOperations` + * 2023-06-02-preview: :class:`ManagedClustersOperations` + * 2023-07-01: :class:`ManagedClustersOperations` + * 2023-07-02-preview: :class:`ManagedClustersOperations` + * 2023-08-01: :class:`ManagedClustersOperations` + * 2023-08-02-preview: :class:`ManagedClustersOperations` + * 2023-09-01: :class:`ManagedClustersOperations` + * 2023-09-02-preview: :class:`ManagedClustersOperations` + * 2023-10-01: :class:`ManagedClustersOperations` + * 2023-10-02-preview: :class:`ManagedClustersOperations` + * 2023-11-01: :class:`ManagedClustersOperations` + * 2023-11-02-preview: :class:`ManagedClustersOperations` + * 2024-01-01: :class:`ManagedClustersOperations` + * 2024-01-02-preview: :class:`ManagedClustersOperations` + * 2024-02-01: :class:`ManagedClustersOperations` + * 2024-02-02-preview: :class:`ManagedClustersOperations` + * 2024-03-02-preview: :class:`ManagedClustersOperations` + * 2024-04-02-preview: :class:`ManagedClustersOperations` + * 2024-05-01: :class:`ManagedClustersOperations` + * 2024-05-02-preview: :class:`ManagedClustersOperations` + * 2024-06-02-preview: :class:`ManagedClustersOperations` + * 2024-07-01: :class:`ManagedClustersOperations` + """ + api_version = self._get_api_version('managed_clusters') + if api_version == '2018-03-31': + from .v2018_03_31.operations import ManagedClustersOperations as OperationClass + elif api_version == '2018-08-01-preview': + from .v2018_08_01_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-02-01': + from .v2019_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-04-01': + from .v2019_04_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-06-01': + from .v2019_06_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-08-01': + from .v2019_08_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-10-01': + from .v2019_10_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-11-01': + from .v2019_11_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-01-01': + from .v2020_01_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-02-01': + from .v2020_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-03-01': + from .v2020_03_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-04-01': + from .v2020_04_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-06-01': + from .v2020_06_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-07-01': + from .v2020_07_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-09-01': + from .v2020_09_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import ManagedClustersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'managed_clusters'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def open_shift_managed_clusters(self): + """Instance depends on the API version: + + * 2018-09-30-preview: :class:`OpenShiftManagedClustersOperations` + * 2019-04-30: :class:`OpenShiftManagedClustersOperations` + * 2019-09-30-preview: :class:`OpenShiftManagedClustersOperations` + * 2019-10-27-preview: :class:`OpenShiftManagedClustersOperations` + """ + api_version = self._get_api_version('open_shift_managed_clusters') + if api_version == '2018-09-30-preview': + from .v2018_09_30_preview.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-04-30': + from .v2019_04_30.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-09-30-preview': + from .v2019_09_30_preview.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-10-27-preview': + from .v2019_10_27_preview.operations import OpenShiftManagedClustersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'open_shift_managed_clusters'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operation_status_result(self): + """Instance depends on the API version: + + * 2023-10-02-preview: :class:`OperationStatusResultOperations` + * 2023-11-02-preview: :class:`OperationStatusResultOperations` + * 2024-01-02-preview: :class:`OperationStatusResultOperations` + * 2024-02-02-preview: :class:`OperationStatusResultOperations` + * 2024-03-02-preview: :class:`OperationStatusResultOperations` + * 2024-04-02-preview: :class:`OperationStatusResultOperations` + * 2024-05-02-preview: :class:`OperationStatusResultOperations` + * 2024-06-02-preview: :class:`OperationStatusResultOperations` + """ + api_version = self._get_api_version('operation_status_result') + if api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import OperationStatusResultOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operation_status_result'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operations(self): + """Instance depends on the API version: + + * 2018-03-31: :class:`Operations` + * 2018-08-01-preview: :class:`Operations` + * 2019-02-01: :class:`Operations` + * 2019-04-01: :class:`Operations` + * 2019-06-01: :class:`Operations` + * 2019-08-01: :class:`Operations` + * 2019-10-01: :class:`Operations` + * 2019-11-01: :class:`Operations` + * 2020-01-01: :class:`Operations` + * 2020-02-01: :class:`Operations` + * 2020-03-01: :class:`Operations` + * 2020-04-01: :class:`Operations` + * 2020-06-01: :class:`Operations` + * 2020-07-01: :class:`Operations` + * 2020-09-01: :class:`Operations` + * 2020-11-01: :class:`Operations` + * 2020-12-01: :class:`Operations` + * 2021-02-01: :class:`Operations` + * 2021-03-01: :class:`Operations` + * 2021-05-01: :class:`Operations` + * 2021-07-01: :class:`Operations` + * 2021-08-01: :class:`Operations` + * 2021-09-01: :class:`Operations` + * 2021-10-01: :class:`Operations` + * 2021-11-01-preview: :class:`Operations` + * 2022-01-01: :class:`Operations` + * 2022-01-02-preview: :class:`Operations` + * 2022-02-01: :class:`Operations` + * 2022-02-02-preview: :class:`Operations` + * 2022-03-01: :class:`Operations` + * 2022-03-02-preview: :class:`Operations` + * 2022-04-01: :class:`Operations` + * 2022-04-02-preview: :class:`Operations` + * 2022-05-02-preview: :class:`Operations` + * 2022-06-01: :class:`Operations` + * 2022-06-02-preview: :class:`Operations` + * 2022-07-01: :class:`Operations` + * 2022-07-02-preview: :class:`Operations` + * 2022-08-02-preview: :class:`Operations` + * 2022-08-03-preview: :class:`Operations` + * 2022-09-01: :class:`Operations` + * 2022-09-02-preview: :class:`Operations` + * 2022-10-02-preview: :class:`Operations` + * 2022-11-01: :class:`Operations` + * 2022-11-02-preview: :class:`Operations` + * 2023-01-01: :class:`Operations` + * 2023-01-02-preview: :class:`Operations` + * 2023-02-01: :class:`Operations` + * 2023-02-02-preview: :class:`Operations` + * 2023-03-01: :class:`Operations` + * 2023-03-02-preview: :class:`Operations` + * 2023-04-01: :class:`Operations` + * 2023-04-02-preview: :class:`Operations` + * 2023-05-01: :class:`Operations` + * 2023-05-02-preview: :class:`Operations` + * 2023-06-01: :class:`Operations` + * 2023-06-02-preview: :class:`Operations` + * 2023-07-01: :class:`Operations` + * 2023-07-02-preview: :class:`Operations` + * 2023-08-01: :class:`Operations` + * 2023-08-02-preview: :class:`Operations` + * 2023-09-01: :class:`Operations` + * 2023-09-02-preview: :class:`Operations` + * 2023-10-01: :class:`Operations` + * 2023-10-02-preview: :class:`Operations` + * 2023-11-01: :class:`Operations` + * 2023-11-02-preview: :class:`Operations` + * 2024-01-01: :class:`Operations` + * 2024-01-02-preview: :class:`Operations` + * 2024-02-01: :class:`Operations` + * 2024-02-02-preview: :class:`Operations` + * 2024-03-02-preview: :class:`Operations` + * 2024-04-02-preview: :class:`Operations` + * 2024-05-01: :class:`Operations` + * 2024-05-02-preview: :class:`Operations` + * 2024-06-02-preview: :class:`Operations` + * 2024-07-01: :class:`Operations` + """ + api_version = self._get_api_version('operations') + if api_version == '2018-03-31': + from .v2018_03_31.operations import Operations as OperationClass + elif api_version == '2018-08-01-preview': + from .v2018_08_01_preview.operations import Operations as OperationClass + elif api_version == '2019-02-01': + from .v2019_02_01.operations import Operations as OperationClass + elif api_version == '2019-04-01': + from .v2019_04_01.operations import Operations as OperationClass + elif api_version == '2019-06-01': + from .v2019_06_01.operations import Operations as OperationClass + elif api_version == '2019-08-01': + from .v2019_08_01.operations import Operations as OperationClass + elif api_version == '2019-10-01': + from .v2019_10_01.operations import Operations as OperationClass + elif api_version == '2019-11-01': + from .v2019_11_01.operations import Operations as OperationClass + elif api_version == '2020-01-01': + from .v2020_01_01.operations import Operations as OperationClass + elif api_version == '2020-02-01': + from .v2020_02_01.operations import Operations as OperationClass + elif api_version == '2020-03-01': + from .v2020_03_01.operations import Operations as OperationClass + elif api_version == '2020-04-01': + from .v2020_04_01.operations import Operations as OperationClass + elif api_version == '2020-06-01': + from .v2020_06_01.operations import Operations as OperationClass + elif api_version == '2020-07-01': + from .v2020_07_01.operations import Operations as OperationClass + elif api_version == '2020-09-01': + from .v2020_09_01.operations import Operations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import Operations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import Operations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import Operations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import Operations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import Operations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import Operations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import Operations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import Operations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import Operations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import Operations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import Operations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import Operations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import Operations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import Operations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import Operations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import Operations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import Operations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import Operations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import Operations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import Operations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import Operations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import Operations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import Operations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import Operations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import Operations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import Operations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import Operations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import Operations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import Operations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import Operations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import Operations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import Operations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import Operations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import Operations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import Operations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import Operations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import Operations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import Operations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import Operations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import Operations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import Operations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import Operations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import Operations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import Operations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import Operations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import Operations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import Operations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import Operations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import Operations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import Operations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import Operations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import Operations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import Operations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import Operations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import Operations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import Operations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import Operations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import Operations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import Operations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import Operations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import Operations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import Operations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_endpoint_connections(self): + """Instance depends on the API version: + + * 2020-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-12-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-08-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-10-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-11-01-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-04-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-07-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-08-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-08-03-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-09-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-10-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-11-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-04-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-07-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-08-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-08-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-09-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-10-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-10-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-11-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-07-01: :class:`PrivateEndpointConnectionsOperations` + """ + api_version = self._get_api_version('private_endpoint_connections') + if api_version == '2020-06-01': + from .v2020_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-07-01': + from .v2020_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-09-01': + from .v2020_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_resources(self): + """Instance depends on the API version: + + * 2020-09-01: :class:`PrivateLinkResourcesOperations` + * 2020-11-01: :class:`PrivateLinkResourcesOperations` + * 2020-12-01: :class:`PrivateLinkResourcesOperations` + * 2021-02-01: :class:`PrivateLinkResourcesOperations` + * 2021-03-01: :class:`PrivateLinkResourcesOperations` + * 2021-05-01: :class:`PrivateLinkResourcesOperations` + * 2021-07-01: :class:`PrivateLinkResourcesOperations` + * 2021-08-01: :class:`PrivateLinkResourcesOperations` + * 2021-09-01: :class:`PrivateLinkResourcesOperations` + * 2021-10-01: :class:`PrivateLinkResourcesOperations` + * 2021-11-01-preview: :class:`PrivateLinkResourcesOperations` + * 2022-01-01: :class:`PrivateLinkResourcesOperations` + * 2022-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-02-01: :class:`PrivateLinkResourcesOperations` + * 2022-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-03-01: :class:`PrivateLinkResourcesOperations` + * 2022-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-04-01: :class:`PrivateLinkResourcesOperations` + * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-06-01: :class:`PrivateLinkResourcesOperations` + * 2022-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-07-01: :class:`PrivateLinkResourcesOperations` + * 2022-07-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-08-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-08-03-preview: :class:`PrivateLinkResourcesOperations` + * 2022-09-01: :class:`PrivateLinkResourcesOperations` + * 2022-09-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-10-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-11-01: :class:`PrivateLinkResourcesOperations` + * 2022-11-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-01-01: :class:`PrivateLinkResourcesOperations` + * 2023-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-02-01: :class:`PrivateLinkResourcesOperations` + * 2023-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-03-01: :class:`PrivateLinkResourcesOperations` + * 2023-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-04-01: :class:`PrivateLinkResourcesOperations` + * 2023-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-05-01: :class:`PrivateLinkResourcesOperations` + * 2023-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-06-01: :class:`PrivateLinkResourcesOperations` + * 2023-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-07-01: :class:`PrivateLinkResourcesOperations` + * 2023-07-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-08-01: :class:`PrivateLinkResourcesOperations` + * 2023-08-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-09-01: :class:`PrivateLinkResourcesOperations` + * 2023-09-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-10-01: :class:`PrivateLinkResourcesOperations` + * 2023-10-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-11-01: :class:`PrivateLinkResourcesOperations` + * 2023-11-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-01-01: :class:`PrivateLinkResourcesOperations` + * 2024-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-02-01: :class:`PrivateLinkResourcesOperations` + * 2024-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-05-01: :class:`PrivateLinkResourcesOperations` + * 2024-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-07-01: :class:`PrivateLinkResourcesOperations` + """ + api_version = self._get_api_version('private_link_resources') + if api_version == '2020-09-01': + from .v2020_09_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import PrivateLinkResourcesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def resolve_private_link_service_id(self): + """Instance depends on the API version: + + * 2020-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2020-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2020-12-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-08-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-10-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-11-01-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-04-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-06-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-08-03-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-04-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-06-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-08-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-10-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + """ + api_version = self._get_api_version('resolve_private_link_service_id') + if api_version == '2020-09-01': + from .v2020_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2020-11-01': + from .v2020_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2020-12-01': + from .v2020_12_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-02-01': + from .v2021_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-05-01': + from .v2021_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-07-01': + from .v2021_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-08-01': + from .v2021_08_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'resolve_private_link_service_id'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def snapshots(self): + """Instance depends on the API version: + + * 2021-08-01: :class:`SnapshotsOperations` + * 2021-09-01: :class:`SnapshotsOperations` + * 2021-10-01: :class:`SnapshotsOperations` + * 2021-11-01-preview: :class:`SnapshotsOperations` + * 2022-01-01: :class:`SnapshotsOperations` + * 2022-01-02-preview: :class:`SnapshotsOperations` + * 2022-02-01: :class:`SnapshotsOperations` + * 2022-02-02-preview: :class:`SnapshotsOperations` + * 2022-03-01: :class:`SnapshotsOperations` + * 2022-03-02-preview: :class:`SnapshotsOperations` + * 2022-04-01: :class:`SnapshotsOperations` + * 2022-04-02-preview: :class:`SnapshotsOperations` + * 2022-05-02-preview: :class:`SnapshotsOperations` + * 2022-06-01: :class:`SnapshotsOperations` + * 2022-06-02-preview: :class:`SnapshotsOperations` + * 2022-07-01: :class:`SnapshotsOperations` + * 2022-07-02-preview: :class:`SnapshotsOperations` + * 2022-08-02-preview: :class:`SnapshotsOperations` + * 2022-08-03-preview: :class:`SnapshotsOperations` + * 2022-09-01: :class:`SnapshotsOperations` + * 2022-09-02-preview: :class:`SnapshotsOperations` + * 2022-10-02-preview: :class:`SnapshotsOperations` + * 2022-11-01: :class:`SnapshotsOperations` + * 2022-11-02-preview: :class:`SnapshotsOperations` + * 2023-01-01: :class:`SnapshotsOperations` + * 2023-01-02-preview: :class:`SnapshotsOperations` + * 2023-02-01: :class:`SnapshotsOperations` + * 2023-02-02-preview: :class:`SnapshotsOperations` + * 2023-03-01: :class:`SnapshotsOperations` + * 2023-03-02-preview: :class:`SnapshotsOperations` + * 2023-04-01: :class:`SnapshotsOperations` + * 2023-04-02-preview: :class:`SnapshotsOperations` + * 2023-05-01: :class:`SnapshotsOperations` + * 2023-05-02-preview: :class:`SnapshotsOperations` + * 2023-06-01: :class:`SnapshotsOperations` + * 2023-06-02-preview: :class:`SnapshotsOperations` + * 2023-07-01: :class:`SnapshotsOperations` + * 2023-07-02-preview: :class:`SnapshotsOperations` + * 2023-08-01: :class:`SnapshotsOperations` + * 2023-08-02-preview: :class:`SnapshotsOperations` + * 2023-09-01: :class:`SnapshotsOperations` + * 2023-09-02-preview: :class:`SnapshotsOperations` + * 2023-10-01: :class:`SnapshotsOperations` + * 2023-10-02-preview: :class:`SnapshotsOperations` + * 2023-11-01: :class:`SnapshotsOperations` + * 2023-11-02-preview: :class:`SnapshotsOperations` + * 2024-01-01: :class:`SnapshotsOperations` + * 2024-01-02-preview: :class:`SnapshotsOperations` + * 2024-02-01: :class:`SnapshotsOperations` + * 2024-02-02-preview: :class:`SnapshotsOperations` + * 2024-03-02-preview: :class:`SnapshotsOperations` + * 2024-04-02-preview: :class:`SnapshotsOperations` + * 2024-05-01: :class:`SnapshotsOperations` + * 2024-05-02-preview: :class:`SnapshotsOperations` + * 2024-06-02-preview: :class:`SnapshotsOperations` + * 2024-07-01: :class:`SnapshotsOperations` + """ + api_version = self._get_api_version('snapshots') + if api_version == '2021-08-01': + from .v2021_08_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-10-01': + from .v2021_10_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-01-01': + from .v2022_01_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from .v2022_01_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-02-01': + from .v2022_02_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from .v2022_02_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from .v2022_03_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-04-01': + from .v2022_04_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-06-01': + from .v2022_06_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-09-01': + from .v2022_09_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-01': + from .v2023_01_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-02-01': + from .v2023_02_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-03-01': + from .v2023_03_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-04-01': + from .v2023_04_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-06-01': + from .v2023_06_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-07-01': + from .v2023_07_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-08-01': + from .v2023_08_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import SnapshotsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def trusted_access_role_bindings(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-08-03-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-09-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-10-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-11-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-01-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-02-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-05-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-07-01: :class:`TrustedAccessRoleBindingsOperations` + """ + api_version = self._get_api_version('trusted_access_role_bindings') + if api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import TrustedAccessRoleBindingsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'trusted_access_role_bindings'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def trusted_access_roles(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-07-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-08-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-08-03-preview: :class:`TrustedAccessRolesOperations` + * 2022-09-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-10-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-11-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-01-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-02-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-03-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-07-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-08-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-09-01: :class:`TrustedAccessRolesOperations` + * 2023-09-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-10-01: :class:`TrustedAccessRolesOperations` + * 2023-10-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-11-01: :class:`TrustedAccessRolesOperations` + * 2023-11-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-01-01: :class:`TrustedAccessRolesOperations` + * 2024-01-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-02-01: :class:`TrustedAccessRolesOperations` + * 2024-02-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-03-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-05-01: :class:`TrustedAccessRolesOperations` + * 2024-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-07-01: :class:`TrustedAccessRolesOperations` + """ + api_version = self._get_api_version('trusted_access_roles') + if api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-05-02-preview': + from .v2022_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-06-02-preview': + from .v2022_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-07-02-preview': + from .v2022_07_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-08-02-preview': + from .v2022_08_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-08-03-preview': + from .v2022_08_03_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-09-02-preview': + from .v2022_09_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-10-02-preview': + from .v2022_10_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-11-02-preview': + from .v2022_11_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-01-02-preview': + from .v2023_01_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-02-02-preview': + from .v2023_02_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-03-02-preview': + from .v2023_03_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-04-02-preview': + from .v2023_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-05-02-preview': + from .v2023_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-06-02-preview': + from .v2023_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-07-02-preview': + from .v2023_07_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from .v2023_08_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-09-01': + from .v2023_09_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from .v2023_09_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-10-01': + from .v2023_10_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from .v2023_10_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-11-01': + from .v2023_11_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from .v2023_11_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-01-01': + from .v2024_01_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from .v2024_01_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-02-01': + from .v2024_02_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from .v2024_02_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from .v2024_03_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from .v2024_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-05-01': + from .v2024_05_01.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from .v2024_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from .v2024_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-07-01': + from .v2024_07_01.operations import TrustedAccessRolesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'trusted_access_roles'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py new file mode 100644 index 00000000000..59f1fcf71bc --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py @@ -0,0 +1,2006 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[ + [str, Dict[str, Any], Any], Any + ] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]]=None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :keyword bool skip_quote: Whether to skip quote the serialized result. + Defaults to False. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get('skip_quote', False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :keyword bool do_quote: Whether to quote the serialized result of each iterable element. + Defaults to False. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get('do_quote', False): + serialized = [ + '' if s is None else quote(str(s), safe='') + for s + in serialized + ] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]]=None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + else: + return date_obj diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py new file mode 100644 index 00000000000..92050be7ebd --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py @@ -0,0 +1,8 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "32.0.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py new file mode 100644 index 00000000000..4ad2bb20096 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._container_service_client import ContainerServiceClient +__all__ = ['ContainerServiceClient'] diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py new file mode 100644 index 00000000000..5cb904176b5 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +class ContainerServiceClientConfiguration: + """Configuration for ContainerServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'azure-mgmt-containerservice/{}'.format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py new file mode 100644 index 00000000000..5d89a224cab --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py @@ -0,0 +1,2655 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.mgmt.core import AsyncARMPipelineClient +from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy +from azure.profiles import KnownProfiles, ProfileDefinition +from azure.profiles.multiapiclient import MultiApiClientMixin + +from .._serialization import Deserializer, Serializer +from ._configuration import ContainerServiceClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +class _SDKClient(object): + def __init__(self, *args, **kwargs): + """This is a fake class to support current implemetation of MultiApiClientMixin." + Will be removed in final version of multiapi azure-core based client + """ + pass + +class ContainerServiceClient(MultiApiClientMixin, _SDKClient): + """The Container Service Client. + + This ready contains multiple API versions, to help you deal with all of the Azure clouds + (Azure Stack, Azure Government, Azure China, etc.). + By default, it uses the latest API version available on public Azure. + For production, you should stick to a particular api-version and/or profile. + The profile sets a mapping between an operation group and its API version. + The api-version parameter sets the default API version if the operation + group is not described in the profile. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param api_version: API version to use if no profile is provided, or if missing in profile. + :type api_version: str + :param base_url: Service URL + :type base_url: str + :param profile: A profile definition, from KnownProfiles to dict. + :type profile: azure.profiles.KnownProfiles + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + DEFAULT_API_VERSION = '2024-07-01' + _PROFILE_TAG = "azure.mgmt.containerservice.ContainerServiceClient" + LATEST_PROFILE = ProfileDefinition({ + _PROFILE_TAG: { + None: DEFAULT_API_VERSION, + 'container_services': '2019-04-01', + 'fleet_members': '2022-09-02-preview', + 'fleets': '2022-09-02-preview', + 'load_balancers': '2024-06-02-preview', + 'managed_cluster_snapshots': '2024-06-02-preview', + 'open_shift_managed_clusters': '2019-04-30', + 'operation_status_result': '2024-06-02-preview', + }}, + _PROFILE_TAG + " latest" + ) + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + api_version: Optional[str] = None, + base_url: str = "https://management.azure.com", + profile: KnownProfiles = KnownProfiles.default, + **kwargs: Any + ) -> None: + if api_version: + kwargs.setdefault('api_version', api_version) + self._config = ContainerServiceClientConfiguration(credential, subscription_id, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + AsyncARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client = AsyncARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) + super(ContainerServiceClient, self).__init__( + api_version=api_version, + profile=profile + ) + + @classmethod + def _models_dict(cls, api_version): + return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} + + @classmethod + def models(cls, api_version=DEFAULT_API_VERSION): + """Module depends on the API version: + + * 2017-07-01: :mod:`v2017_07_01.models` + * 2018-03-31: :mod:`v2018_03_31.models` + * 2018-08-01-preview: :mod:`v2018_08_01_preview.models` + * 2018-09-30-preview: :mod:`v2018_09_30_preview.models` + * 2019-02-01: :mod:`v2019_02_01.models` + * 2019-04-01: :mod:`v2019_04_01.models` + * 2019-04-30: :mod:`v2019_04_30.models` + * 2019-06-01: :mod:`v2019_06_01.models` + * 2019-08-01: :mod:`v2019_08_01.models` + * 2019-09-30-preview: :mod:`v2019_09_30_preview.models` + * 2019-10-01: :mod:`v2019_10_01.models` + * 2019-10-27-preview: :mod:`v2019_10_27_preview.models` + * 2019-11-01: :mod:`v2019_11_01.models` + * 2020-01-01: :mod:`v2020_01_01.models` + * 2020-02-01: :mod:`v2020_02_01.models` + * 2020-03-01: :mod:`v2020_03_01.models` + * 2020-04-01: :mod:`v2020_04_01.models` + * 2020-06-01: :mod:`v2020_06_01.models` + * 2020-07-01: :mod:`v2020_07_01.models` + * 2020-09-01: :mod:`v2020_09_01.models` + * 2020-11-01: :mod:`v2020_11_01.models` + * 2020-12-01: :mod:`v2020_12_01.models` + * 2021-02-01: :mod:`v2021_02_01.models` + * 2021-03-01: :mod:`v2021_03_01.models` + * 2021-05-01: :mod:`v2021_05_01.models` + * 2021-07-01: :mod:`v2021_07_01.models` + * 2021-08-01: :mod:`v2021_08_01.models` + * 2021-09-01: :mod:`v2021_09_01.models` + * 2021-10-01: :mod:`v2021_10_01.models` + * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` + * 2022-01-01: :mod:`v2022_01_01.models` + * 2022-01-02-preview: :mod:`v2022_01_02_preview.models` + * 2022-02-01: :mod:`v2022_02_01.models` + * 2022-02-02-preview: :mod:`v2022_02_02_preview.models` + * 2022-03-01: :mod:`v2022_03_01.models` + * 2022-03-02-preview: :mod:`v2022_03_02_preview.models` + * 2022-04-01: :mod:`v2022_04_01.models` + * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` + * 2022-05-02-preview: :mod:`v2022_05_02_preview.models` + * 2022-06-01: :mod:`v2022_06_01.models` + * 2022-06-02-preview: :mod:`v2022_06_02_preview.models` + * 2022-07-01: :mod:`v2022_07_01.models` + * 2022-07-02-preview: :mod:`v2022_07_02_preview.models` + * 2022-08-02-preview: :mod:`v2022_08_02_preview.models` + * 2022-08-03-preview: :mod:`v2022_08_03_preview.models` + * 2022-09-01: :mod:`v2022_09_01.models` + * 2022-09-02-preview: :mod:`v2022_09_02_preview.models` + * 2022-10-02-preview: :mod:`v2022_10_02_preview.models` + * 2022-11-01: :mod:`v2022_11_01.models` + * 2022-11-02-preview: :mod:`v2022_11_02_preview.models` + * 2023-01-01: :mod:`v2023_01_01.models` + * 2023-01-02-preview: :mod:`v2023_01_02_preview.models` + * 2023-02-01: :mod:`v2023_02_01.models` + * 2023-02-02-preview: :mod:`v2023_02_02_preview.models` + * 2023-03-01: :mod:`v2023_03_01.models` + * 2023-03-02-preview: :mod:`v2023_03_02_preview.models` + * 2023-04-01: :mod:`v2023_04_01.models` + * 2023-04-02-preview: :mod:`v2023_04_02_preview.models` + * 2023-05-01: :mod:`v2023_05_01.models` + * 2023-05-02-preview: :mod:`v2023_05_02_preview.models` + * 2023-06-01: :mod:`v2023_06_01.models` + * 2023-06-02-preview: :mod:`v2023_06_02_preview.models` + * 2023-07-01: :mod:`v2023_07_01.models` + * 2023-07-02-preview: :mod:`v2023_07_02_preview.models` + * 2023-08-01: :mod:`v2023_08_01.models` + * 2023-08-02-preview: :mod:`v2023_08_02_preview.models` + * 2023-09-01: :mod:`v2023_09_01.models` + * 2023-09-02-preview: :mod:`v2023_09_02_preview.models` + * 2023-10-01: :mod:`v2023_10_01.models` + * 2023-10-02-preview: :mod:`v2023_10_02_preview.models` + * 2023-11-01: :mod:`v2023_11_01.models` + * 2023-11-02-preview: :mod:`v2023_11_02_preview.models` + * 2024-01-01: :mod:`v2024_01_01.models` + * 2024-01-02-preview: :mod:`v2024_01_02_preview.models` + * 2024-02-01: :mod:`v2024_02_01.models` + * 2024-02-02-preview: :mod:`v2024_02_02_preview.models` + * 2024-03-02-preview: :mod:`v2024_03_02_preview.models` + * 2024-04-02-preview: :mod:`v2024_04_02_preview.models` + * 2024-05-01: :mod:`v2024_05_01.models` + * 2024-05-02-preview: :mod:`v2024_05_02_preview.models` + * 2024-06-02-preview: :mod:`v2024_06_02_preview.models` + * 2024-07-01: :mod:`v2024_07_01.models` + """ + if api_version == '2017-07-01': + from ..v2017_07_01 import models + return models + elif api_version == '2018-03-31': + from ..v2018_03_31 import models + return models + elif api_version == '2018-08-01-preview': + from ..v2018_08_01_preview import models + return models + elif api_version == '2018-09-30-preview': + from ..v2018_09_30_preview import models + return models + elif api_version == '2019-02-01': + from ..v2019_02_01 import models + return models + elif api_version == '2019-04-01': + from ..v2019_04_01 import models + return models + elif api_version == '2019-04-30': + from ..v2019_04_30 import models + return models + elif api_version == '2019-06-01': + from ..v2019_06_01 import models + return models + elif api_version == '2019-08-01': + from ..v2019_08_01 import models + return models + elif api_version == '2019-09-30-preview': + from ..v2019_09_30_preview import models + return models + elif api_version == '2019-10-01': + from ..v2019_10_01 import models + return models + elif api_version == '2019-10-27-preview': + from ..v2019_10_27_preview import models + return models + elif api_version == '2019-11-01': + from ..v2019_11_01 import models + return models + elif api_version == '2020-01-01': + from ..v2020_01_01 import models + return models + elif api_version == '2020-02-01': + from ..v2020_02_01 import models + return models + elif api_version == '2020-03-01': + from ..v2020_03_01 import models + return models + elif api_version == '2020-04-01': + from ..v2020_04_01 import models + return models + elif api_version == '2020-06-01': + from ..v2020_06_01 import models + return models + elif api_version == '2020-07-01': + from ..v2020_07_01 import models + return models + elif api_version == '2020-09-01': + from ..v2020_09_01 import models + return models + elif api_version == '2020-11-01': + from ..v2020_11_01 import models + return models + elif api_version == '2020-12-01': + from ..v2020_12_01 import models + return models + elif api_version == '2021-02-01': + from ..v2021_02_01 import models + return models + elif api_version == '2021-03-01': + from ..v2021_03_01 import models + return models + elif api_version == '2021-05-01': + from ..v2021_05_01 import models + return models + elif api_version == '2021-07-01': + from ..v2021_07_01 import models + return models + elif api_version == '2021-08-01': + from ..v2021_08_01 import models + return models + elif api_version == '2021-09-01': + from ..v2021_09_01 import models + return models + elif api_version == '2021-10-01': + from ..v2021_10_01 import models + return models + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview import models + return models + elif api_version == '2022-01-01': + from ..v2022_01_01 import models + return models + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview import models + return models + elif api_version == '2022-02-01': + from ..v2022_02_01 import models + return models + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview import models + return models + elif api_version == '2022-03-01': + from ..v2022_03_01 import models + return models + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview import models + return models + elif api_version == '2022-04-01': + from ..v2022_04_01 import models + return models + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview import models + return models + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview import models + return models + elif api_version == '2022-06-01': + from ..v2022_06_01 import models + return models + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview import models + return models + elif api_version == '2022-07-01': + from ..v2022_07_01 import models + return models + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview import models + return models + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview import models + return models + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview import models + return models + elif api_version == '2022-09-01': + from ..v2022_09_01 import models + return models + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview import models + return models + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview import models + return models + elif api_version == '2022-11-01': + from ..v2022_11_01 import models + return models + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview import models + return models + elif api_version == '2023-01-01': + from ..v2023_01_01 import models + return models + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview import models + return models + elif api_version == '2023-02-01': + from ..v2023_02_01 import models + return models + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview import models + return models + elif api_version == '2023-03-01': + from ..v2023_03_01 import models + return models + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview import models + return models + elif api_version == '2023-04-01': + from ..v2023_04_01 import models + return models + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview import models + return models + elif api_version == '2023-05-01': + from ..v2023_05_01 import models + return models + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview import models + return models + elif api_version == '2023-06-01': + from ..v2023_06_01 import models + return models + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview import models + return models + elif api_version == '2023-07-01': + from ..v2023_07_01 import models + return models + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview import models + return models + elif api_version == '2023-08-01': + from ..v2023_08_01 import models + return models + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview import models + return models + elif api_version == '2023-09-01': + from ..v2023_09_01 import models + return models + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview import models + return models + elif api_version == '2023-10-01': + from ..v2023_10_01 import models + return models + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview import models + return models + elif api_version == '2023-11-01': + from ..v2023_11_01 import models + return models + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview import models + return models + elif api_version == '2024-01-01': + from ..v2024_01_01 import models + return models + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview import models + return models + elif api_version == '2024-02-01': + from ..v2024_02_01 import models + return models + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview import models + return models + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview import models + return models + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview import models + return models + elif api_version == '2024-05-01': + from ..v2024_05_01 import models + return models + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview import models + return models + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview import models + return models + elif api_version == '2024-07-01': + from ..v2024_07_01 import models + return models + raise ValueError("API version {} is not available".format(api_version)) + + @property + def agent_pools(self): + """Instance depends on the API version: + + * 2019-02-01: :class:`AgentPoolsOperations` + * 2019-04-01: :class:`AgentPoolsOperations` + * 2019-06-01: :class:`AgentPoolsOperations` + * 2019-08-01: :class:`AgentPoolsOperations` + * 2019-10-01: :class:`AgentPoolsOperations` + * 2019-11-01: :class:`AgentPoolsOperations` + * 2020-01-01: :class:`AgentPoolsOperations` + * 2020-02-01: :class:`AgentPoolsOperations` + * 2020-03-01: :class:`AgentPoolsOperations` + * 2020-04-01: :class:`AgentPoolsOperations` + * 2020-06-01: :class:`AgentPoolsOperations` + * 2020-07-01: :class:`AgentPoolsOperations` + * 2020-09-01: :class:`AgentPoolsOperations` + * 2020-11-01: :class:`AgentPoolsOperations` + * 2020-12-01: :class:`AgentPoolsOperations` + * 2021-02-01: :class:`AgentPoolsOperations` + * 2021-03-01: :class:`AgentPoolsOperations` + * 2021-05-01: :class:`AgentPoolsOperations` + * 2021-07-01: :class:`AgentPoolsOperations` + * 2021-08-01: :class:`AgentPoolsOperations` + * 2021-09-01: :class:`AgentPoolsOperations` + * 2021-10-01: :class:`AgentPoolsOperations` + * 2021-11-01-preview: :class:`AgentPoolsOperations` + * 2022-01-01: :class:`AgentPoolsOperations` + * 2022-01-02-preview: :class:`AgentPoolsOperations` + * 2022-02-01: :class:`AgentPoolsOperations` + * 2022-02-02-preview: :class:`AgentPoolsOperations` + * 2022-03-01: :class:`AgentPoolsOperations` + * 2022-03-02-preview: :class:`AgentPoolsOperations` + * 2022-04-01: :class:`AgentPoolsOperations` + * 2022-04-02-preview: :class:`AgentPoolsOperations` + * 2022-05-02-preview: :class:`AgentPoolsOperations` + * 2022-06-01: :class:`AgentPoolsOperations` + * 2022-06-02-preview: :class:`AgentPoolsOperations` + * 2022-07-01: :class:`AgentPoolsOperations` + * 2022-07-02-preview: :class:`AgentPoolsOperations` + * 2022-08-02-preview: :class:`AgentPoolsOperations` + * 2022-08-03-preview: :class:`AgentPoolsOperations` + * 2022-09-01: :class:`AgentPoolsOperations` + * 2022-09-02-preview: :class:`AgentPoolsOperations` + * 2022-10-02-preview: :class:`AgentPoolsOperations` + * 2022-11-01: :class:`AgentPoolsOperations` + * 2022-11-02-preview: :class:`AgentPoolsOperations` + * 2023-01-01: :class:`AgentPoolsOperations` + * 2023-01-02-preview: :class:`AgentPoolsOperations` + * 2023-02-01: :class:`AgentPoolsOperations` + * 2023-02-02-preview: :class:`AgentPoolsOperations` + * 2023-03-01: :class:`AgentPoolsOperations` + * 2023-03-02-preview: :class:`AgentPoolsOperations` + * 2023-04-01: :class:`AgentPoolsOperations` + * 2023-04-02-preview: :class:`AgentPoolsOperations` + * 2023-05-01: :class:`AgentPoolsOperations` + * 2023-05-02-preview: :class:`AgentPoolsOperations` + * 2023-06-01: :class:`AgentPoolsOperations` + * 2023-06-02-preview: :class:`AgentPoolsOperations` + * 2023-07-01: :class:`AgentPoolsOperations` + * 2023-07-02-preview: :class:`AgentPoolsOperations` + * 2023-08-01: :class:`AgentPoolsOperations` + * 2023-08-02-preview: :class:`AgentPoolsOperations` + * 2023-09-01: :class:`AgentPoolsOperations` + * 2023-09-02-preview: :class:`AgentPoolsOperations` + * 2023-10-01: :class:`AgentPoolsOperations` + * 2023-10-02-preview: :class:`AgentPoolsOperations` + * 2023-11-01: :class:`AgentPoolsOperations` + * 2023-11-02-preview: :class:`AgentPoolsOperations` + * 2024-01-01: :class:`AgentPoolsOperations` + * 2024-01-02-preview: :class:`AgentPoolsOperations` + * 2024-02-01: :class:`AgentPoolsOperations` + * 2024-02-02-preview: :class:`AgentPoolsOperations` + * 2024-03-02-preview: :class:`AgentPoolsOperations` + * 2024-04-02-preview: :class:`AgentPoolsOperations` + * 2024-05-01: :class:`AgentPoolsOperations` + * 2024-05-02-preview: :class:`AgentPoolsOperations` + * 2024-06-02-preview: :class:`AgentPoolsOperations` + * 2024-07-01: :class:`AgentPoolsOperations` + """ + api_version = self._get_api_version('agent_pools') + if api_version == '2019-02-01': + from ..v2019_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-04-01': + from ..v2019_04_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-06-01': + from ..v2019_06_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-08-01': + from ..v2019_08_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-10-01': + from ..v2019_10_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2019-11-01': + from ..v2019_11_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-01-01': + from ..v2020_01_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-02-01': + from ..v2020_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-03-01': + from ..v2020_03_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-04-01': + from ..v2020_04_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-06-01': + from ..v2020_06_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-07-01': + from ..v2020_07_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import AgentPoolsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'agent_pools'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def container_services(self): + """Instance depends on the API version: + + * 2017-07-01: :class:`ContainerServicesOperations` + * 2019-04-01: :class:`ContainerServicesOperations` + """ + api_version = self._get_api_version('container_services') + if api_version == '2017-07-01': + from ..v2017_07_01.aio.operations import ContainerServicesOperations as OperationClass + elif api_version == '2019-04-01': + from ..v2019_04_01.aio.operations import ContainerServicesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'container_services'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def fleet_members(self): + """Instance depends on the API version: + + * 2022-06-02-preview: :class:`FleetMembersOperations` + * 2022-07-02-preview: :class:`FleetMembersOperations` + * 2022-09-02-preview: :class:`FleetMembersOperations` + """ + api_version = self._get_api_version('fleet_members') + if api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import FleetMembersOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import FleetMembersOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import FleetMembersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'fleet_members'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def fleets(self): + """Instance depends on the API version: + + * 2022-06-02-preview: :class:`FleetsOperations` + * 2022-07-02-preview: :class:`FleetsOperations` + * 2022-09-02-preview: :class:`FleetsOperations` + """ + api_version = self._get_api_version('fleets') + if api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import FleetsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import FleetsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import FleetsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'fleets'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def load_balancers(self): + """Instance depends on the API version: + + * 2024-03-02-preview: :class:`LoadBalancersOperations` + * 2024-04-02-preview: :class:`LoadBalancersOperations` + * 2024-05-02-preview: :class:`LoadBalancersOperations` + * 2024-06-02-preview: :class:`LoadBalancersOperations` + """ + api_version = self._get_api_version('load_balancers') + if api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import LoadBalancersOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import LoadBalancersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'load_balancers'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def machines(self): + """Instance depends on the API version: + + * 2023-07-02-preview: :class:`MachinesOperations` + * 2023-08-02-preview: :class:`MachinesOperations` + * 2023-09-02-preview: :class:`MachinesOperations` + * 2023-10-02-preview: :class:`MachinesOperations` + * 2023-11-02-preview: :class:`MachinesOperations` + * 2024-01-02-preview: :class:`MachinesOperations` + * 2024-02-02-preview: :class:`MachinesOperations` + * 2024-03-02-preview: :class:`MachinesOperations` + * 2024-04-02-preview: :class:`MachinesOperations` + * 2024-05-02-preview: :class:`MachinesOperations` + * 2024-06-02-preview: :class:`MachinesOperations` + * 2024-07-01: :class:`MachinesOperations` + """ + api_version = self._get_api_version('machines') + if api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import MachinesOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import MachinesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'machines'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def maintenance_configurations(self): + """Instance depends on the API version: + + * 2020-12-01: :class:`MaintenanceConfigurationsOperations` + * 2021-02-01: :class:`MaintenanceConfigurationsOperations` + * 2021-03-01: :class:`MaintenanceConfigurationsOperations` + * 2021-05-01: :class:`MaintenanceConfigurationsOperations` + * 2021-07-01: :class:`MaintenanceConfigurationsOperations` + * 2021-08-01: :class:`MaintenanceConfigurationsOperations` + * 2021-09-01: :class:`MaintenanceConfigurationsOperations` + * 2021-10-01: :class:`MaintenanceConfigurationsOperations` + * 2021-11-01-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-01-01: :class:`MaintenanceConfigurationsOperations` + * 2022-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-02-01: :class:`MaintenanceConfigurationsOperations` + * 2022-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-03-01: :class:`MaintenanceConfigurationsOperations` + * 2022-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-04-01: :class:`MaintenanceConfigurationsOperations` + * 2022-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-06-01: :class:`MaintenanceConfigurationsOperations` + * 2022-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-07-01: :class:`MaintenanceConfigurationsOperations` + * 2022-07-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-08-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-08-03-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-09-01: :class:`MaintenanceConfigurationsOperations` + * 2022-09-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-10-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2022-11-01: :class:`MaintenanceConfigurationsOperations` + * 2022-11-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-01-01: :class:`MaintenanceConfigurationsOperations` + * 2023-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-02-01: :class:`MaintenanceConfigurationsOperations` + * 2023-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-03-01: :class:`MaintenanceConfigurationsOperations` + * 2023-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-04-01: :class:`MaintenanceConfigurationsOperations` + * 2023-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-05-01: :class:`MaintenanceConfigurationsOperations` + * 2023-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-06-01: :class:`MaintenanceConfigurationsOperations` + * 2023-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-07-01: :class:`MaintenanceConfigurationsOperations` + * 2023-07-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-08-01: :class:`MaintenanceConfigurationsOperations` + * 2023-08-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-09-01: :class:`MaintenanceConfigurationsOperations` + * 2023-09-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-10-01: :class:`MaintenanceConfigurationsOperations` + * 2023-10-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2023-11-01: :class:`MaintenanceConfigurationsOperations` + * 2023-11-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-01-01: :class:`MaintenanceConfigurationsOperations` + * 2024-01-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-02-01: :class:`MaintenanceConfigurationsOperations` + * 2024-02-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-03-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-04-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-05-01: :class:`MaintenanceConfigurationsOperations` + * 2024-05-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-06-02-preview: :class:`MaintenanceConfigurationsOperations` + * 2024-07-01: :class:`MaintenanceConfigurationsOperations` + """ + api_version = self._get_api_version('maintenance_configurations') + if api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'maintenance_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def managed_cluster_snapshots(self): + """Instance depends on the API version: + + * 2022-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-07-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-08-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-08-03-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-09-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-10-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2022-11-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-01-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-07-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-08-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-09-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-10-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2023-11-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-01-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-02-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-03-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-04-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-05-02-preview: :class:`ManagedClusterSnapshotsOperations` + * 2024-06-02-preview: :class:`ManagedClusterSnapshotsOperations` + """ + api_version = self._get_api_version('managed_cluster_snapshots') + if api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'managed_cluster_snapshots'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def managed_clusters(self): + """Instance depends on the API version: + + * 2018-03-31: :class:`ManagedClustersOperations` + * 2018-08-01-preview: :class:`ManagedClustersOperations` + * 2019-02-01: :class:`ManagedClustersOperations` + * 2019-04-01: :class:`ManagedClustersOperations` + * 2019-06-01: :class:`ManagedClustersOperations` + * 2019-08-01: :class:`ManagedClustersOperations` + * 2019-10-01: :class:`ManagedClustersOperations` + * 2019-11-01: :class:`ManagedClustersOperations` + * 2020-01-01: :class:`ManagedClustersOperations` + * 2020-02-01: :class:`ManagedClustersOperations` + * 2020-03-01: :class:`ManagedClustersOperations` + * 2020-04-01: :class:`ManagedClustersOperations` + * 2020-06-01: :class:`ManagedClustersOperations` + * 2020-07-01: :class:`ManagedClustersOperations` + * 2020-09-01: :class:`ManagedClustersOperations` + * 2020-11-01: :class:`ManagedClustersOperations` + * 2020-12-01: :class:`ManagedClustersOperations` + * 2021-02-01: :class:`ManagedClustersOperations` + * 2021-03-01: :class:`ManagedClustersOperations` + * 2021-05-01: :class:`ManagedClustersOperations` + * 2021-07-01: :class:`ManagedClustersOperations` + * 2021-08-01: :class:`ManagedClustersOperations` + * 2021-09-01: :class:`ManagedClustersOperations` + * 2021-10-01: :class:`ManagedClustersOperations` + * 2021-11-01-preview: :class:`ManagedClustersOperations` + * 2022-01-01: :class:`ManagedClustersOperations` + * 2022-01-02-preview: :class:`ManagedClustersOperations` + * 2022-02-01: :class:`ManagedClustersOperations` + * 2022-02-02-preview: :class:`ManagedClustersOperations` + * 2022-03-01: :class:`ManagedClustersOperations` + * 2022-03-02-preview: :class:`ManagedClustersOperations` + * 2022-04-01: :class:`ManagedClustersOperations` + * 2022-04-02-preview: :class:`ManagedClustersOperations` + * 2022-05-02-preview: :class:`ManagedClustersOperations` + * 2022-06-01: :class:`ManagedClustersOperations` + * 2022-06-02-preview: :class:`ManagedClustersOperations` + * 2022-07-01: :class:`ManagedClustersOperations` + * 2022-07-02-preview: :class:`ManagedClustersOperations` + * 2022-08-02-preview: :class:`ManagedClustersOperations` + * 2022-08-03-preview: :class:`ManagedClustersOperations` + * 2022-09-01: :class:`ManagedClustersOperations` + * 2022-09-02-preview: :class:`ManagedClustersOperations` + * 2022-10-02-preview: :class:`ManagedClustersOperations` + * 2022-11-01: :class:`ManagedClustersOperations` + * 2022-11-02-preview: :class:`ManagedClustersOperations` + * 2023-01-01: :class:`ManagedClustersOperations` + * 2023-01-02-preview: :class:`ManagedClustersOperations` + * 2023-02-01: :class:`ManagedClustersOperations` + * 2023-02-02-preview: :class:`ManagedClustersOperations` + * 2023-03-01: :class:`ManagedClustersOperations` + * 2023-03-02-preview: :class:`ManagedClustersOperations` + * 2023-04-01: :class:`ManagedClustersOperations` + * 2023-04-02-preview: :class:`ManagedClustersOperations` + * 2023-05-01: :class:`ManagedClustersOperations` + * 2023-05-02-preview: :class:`ManagedClustersOperations` + * 2023-06-01: :class:`ManagedClustersOperations` + * 2023-06-02-preview: :class:`ManagedClustersOperations` + * 2023-07-01: :class:`ManagedClustersOperations` + * 2023-07-02-preview: :class:`ManagedClustersOperations` + * 2023-08-01: :class:`ManagedClustersOperations` + * 2023-08-02-preview: :class:`ManagedClustersOperations` + * 2023-09-01: :class:`ManagedClustersOperations` + * 2023-09-02-preview: :class:`ManagedClustersOperations` + * 2023-10-01: :class:`ManagedClustersOperations` + * 2023-10-02-preview: :class:`ManagedClustersOperations` + * 2023-11-01: :class:`ManagedClustersOperations` + * 2023-11-02-preview: :class:`ManagedClustersOperations` + * 2024-01-01: :class:`ManagedClustersOperations` + * 2024-01-02-preview: :class:`ManagedClustersOperations` + * 2024-02-01: :class:`ManagedClustersOperations` + * 2024-02-02-preview: :class:`ManagedClustersOperations` + * 2024-03-02-preview: :class:`ManagedClustersOperations` + * 2024-04-02-preview: :class:`ManagedClustersOperations` + * 2024-05-01: :class:`ManagedClustersOperations` + * 2024-05-02-preview: :class:`ManagedClustersOperations` + * 2024-06-02-preview: :class:`ManagedClustersOperations` + * 2024-07-01: :class:`ManagedClustersOperations` + """ + api_version = self._get_api_version('managed_clusters') + if api_version == '2018-03-31': + from ..v2018_03_31.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2018-08-01-preview': + from ..v2018_08_01_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-02-01': + from ..v2019_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-04-01': + from ..v2019_04_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-06-01': + from ..v2019_06_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-08-01': + from ..v2019_08_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-10-01': + from ..v2019_10_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2019-11-01': + from ..v2019_11_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-01-01': + from ..v2020_01_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-02-01': + from ..v2020_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-03-01': + from ..v2020_03_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-04-01': + from ..v2020_04_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-06-01': + from ..v2020_06_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-07-01': + from ..v2020_07_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import ManagedClustersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'managed_clusters'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def open_shift_managed_clusters(self): + """Instance depends on the API version: + + * 2018-09-30-preview: :class:`OpenShiftManagedClustersOperations` + * 2019-04-30: :class:`OpenShiftManagedClustersOperations` + * 2019-09-30-preview: :class:`OpenShiftManagedClustersOperations` + * 2019-10-27-preview: :class:`OpenShiftManagedClustersOperations` + """ + api_version = self._get_api_version('open_shift_managed_clusters') + if api_version == '2018-09-30-preview': + from ..v2018_09_30_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-04-30': + from ..v2019_04_30.aio.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-09-30-preview': + from ..v2019_09_30_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass + elif api_version == '2019-10-27-preview': + from ..v2019_10_27_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'open_shift_managed_clusters'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operation_status_result(self): + """Instance depends on the API version: + + * 2023-10-02-preview: :class:`OperationStatusResultOperations` + * 2023-11-02-preview: :class:`OperationStatusResultOperations` + * 2024-01-02-preview: :class:`OperationStatusResultOperations` + * 2024-02-02-preview: :class:`OperationStatusResultOperations` + * 2024-03-02-preview: :class:`OperationStatusResultOperations` + * 2024-04-02-preview: :class:`OperationStatusResultOperations` + * 2024-05-02-preview: :class:`OperationStatusResultOperations` + * 2024-06-02-preview: :class:`OperationStatusResultOperations` + """ + api_version = self._get_api_version('operation_status_result') + if api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import OperationStatusResultOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operation_status_result'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operations(self): + """Instance depends on the API version: + + * 2018-03-31: :class:`Operations` + * 2018-08-01-preview: :class:`Operations` + * 2019-02-01: :class:`Operations` + * 2019-04-01: :class:`Operations` + * 2019-06-01: :class:`Operations` + * 2019-08-01: :class:`Operations` + * 2019-10-01: :class:`Operations` + * 2019-11-01: :class:`Operations` + * 2020-01-01: :class:`Operations` + * 2020-02-01: :class:`Operations` + * 2020-03-01: :class:`Operations` + * 2020-04-01: :class:`Operations` + * 2020-06-01: :class:`Operations` + * 2020-07-01: :class:`Operations` + * 2020-09-01: :class:`Operations` + * 2020-11-01: :class:`Operations` + * 2020-12-01: :class:`Operations` + * 2021-02-01: :class:`Operations` + * 2021-03-01: :class:`Operations` + * 2021-05-01: :class:`Operations` + * 2021-07-01: :class:`Operations` + * 2021-08-01: :class:`Operations` + * 2021-09-01: :class:`Operations` + * 2021-10-01: :class:`Operations` + * 2021-11-01-preview: :class:`Operations` + * 2022-01-01: :class:`Operations` + * 2022-01-02-preview: :class:`Operations` + * 2022-02-01: :class:`Operations` + * 2022-02-02-preview: :class:`Operations` + * 2022-03-01: :class:`Operations` + * 2022-03-02-preview: :class:`Operations` + * 2022-04-01: :class:`Operations` + * 2022-04-02-preview: :class:`Operations` + * 2022-05-02-preview: :class:`Operations` + * 2022-06-01: :class:`Operations` + * 2022-06-02-preview: :class:`Operations` + * 2022-07-01: :class:`Operations` + * 2022-07-02-preview: :class:`Operations` + * 2022-08-02-preview: :class:`Operations` + * 2022-08-03-preview: :class:`Operations` + * 2022-09-01: :class:`Operations` + * 2022-09-02-preview: :class:`Operations` + * 2022-10-02-preview: :class:`Operations` + * 2022-11-01: :class:`Operations` + * 2022-11-02-preview: :class:`Operations` + * 2023-01-01: :class:`Operations` + * 2023-01-02-preview: :class:`Operations` + * 2023-02-01: :class:`Operations` + * 2023-02-02-preview: :class:`Operations` + * 2023-03-01: :class:`Operations` + * 2023-03-02-preview: :class:`Operations` + * 2023-04-01: :class:`Operations` + * 2023-04-02-preview: :class:`Operations` + * 2023-05-01: :class:`Operations` + * 2023-05-02-preview: :class:`Operations` + * 2023-06-01: :class:`Operations` + * 2023-06-02-preview: :class:`Operations` + * 2023-07-01: :class:`Operations` + * 2023-07-02-preview: :class:`Operations` + * 2023-08-01: :class:`Operations` + * 2023-08-02-preview: :class:`Operations` + * 2023-09-01: :class:`Operations` + * 2023-09-02-preview: :class:`Operations` + * 2023-10-01: :class:`Operations` + * 2023-10-02-preview: :class:`Operations` + * 2023-11-01: :class:`Operations` + * 2023-11-02-preview: :class:`Operations` + * 2024-01-01: :class:`Operations` + * 2024-01-02-preview: :class:`Operations` + * 2024-02-01: :class:`Operations` + * 2024-02-02-preview: :class:`Operations` + * 2024-03-02-preview: :class:`Operations` + * 2024-04-02-preview: :class:`Operations` + * 2024-05-01: :class:`Operations` + * 2024-05-02-preview: :class:`Operations` + * 2024-06-02-preview: :class:`Operations` + * 2024-07-01: :class:`Operations` + """ + api_version = self._get_api_version('operations') + if api_version == '2018-03-31': + from ..v2018_03_31.aio.operations import Operations as OperationClass + elif api_version == '2018-08-01-preview': + from ..v2018_08_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2019-02-01': + from ..v2019_02_01.aio.operations import Operations as OperationClass + elif api_version == '2019-04-01': + from ..v2019_04_01.aio.operations import Operations as OperationClass + elif api_version == '2019-06-01': + from ..v2019_06_01.aio.operations import Operations as OperationClass + elif api_version == '2019-08-01': + from ..v2019_08_01.aio.operations import Operations as OperationClass + elif api_version == '2019-10-01': + from ..v2019_10_01.aio.operations import Operations as OperationClass + elif api_version == '2019-11-01': + from ..v2019_11_01.aio.operations import Operations as OperationClass + elif api_version == '2020-01-01': + from ..v2020_01_01.aio.operations import Operations as OperationClass + elif api_version == '2020-02-01': + from ..v2020_02_01.aio.operations import Operations as OperationClass + elif api_version == '2020-03-01': + from ..v2020_03_01.aio.operations import Operations as OperationClass + elif api_version == '2020-04-01': + from ..v2020_04_01.aio.operations import Operations as OperationClass + elif api_version == '2020-06-01': + from ..v2020_06_01.aio.operations import Operations as OperationClass + elif api_version == '2020-07-01': + from ..v2020_07_01.aio.operations import Operations as OperationClass + elif api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import Operations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import Operations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import Operations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import Operations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import Operations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import Operations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import Operations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import Operations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import Operations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import Operations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import Operations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import Operations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import Operations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import Operations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import Operations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import Operations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import Operations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import Operations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import Operations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import Operations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import Operations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import Operations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import Operations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import Operations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import Operations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import Operations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import Operations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import Operations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import Operations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import Operations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import Operations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import Operations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import Operations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import Operations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_endpoint_connections(self): + """Instance depends on the API version: + + * 2020-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2020-12-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-08-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-10-01: :class:`PrivateEndpointConnectionsOperations` + * 2021-11-01-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-04-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-07-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-08-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-08-03-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-09-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-10-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2022-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2022-11-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-03-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-04-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-06-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-07-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-07-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-08-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-08-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-09-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-09-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-10-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-10-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2023-11-01: :class:`PrivateEndpointConnectionsOperations` + * 2023-11-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-01-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-01-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-02-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-02-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-03-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-05-01: :class:`PrivateEndpointConnectionsOperations` + * 2024-05-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-06-02-preview: :class:`PrivateEndpointConnectionsOperations` + * 2024-07-01: :class:`PrivateEndpointConnectionsOperations` + """ + api_version = self._get_api_version('private_endpoint_connections') + if api_version == '2020-06-01': + from ..v2020_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-07-01': + from ..v2020_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_resources(self): + """Instance depends on the API version: + + * 2020-09-01: :class:`PrivateLinkResourcesOperations` + * 2020-11-01: :class:`PrivateLinkResourcesOperations` + * 2020-12-01: :class:`PrivateLinkResourcesOperations` + * 2021-02-01: :class:`PrivateLinkResourcesOperations` + * 2021-03-01: :class:`PrivateLinkResourcesOperations` + * 2021-05-01: :class:`PrivateLinkResourcesOperations` + * 2021-07-01: :class:`PrivateLinkResourcesOperations` + * 2021-08-01: :class:`PrivateLinkResourcesOperations` + * 2021-09-01: :class:`PrivateLinkResourcesOperations` + * 2021-10-01: :class:`PrivateLinkResourcesOperations` + * 2021-11-01-preview: :class:`PrivateLinkResourcesOperations` + * 2022-01-01: :class:`PrivateLinkResourcesOperations` + * 2022-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-02-01: :class:`PrivateLinkResourcesOperations` + * 2022-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-03-01: :class:`PrivateLinkResourcesOperations` + * 2022-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-04-01: :class:`PrivateLinkResourcesOperations` + * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-06-01: :class:`PrivateLinkResourcesOperations` + * 2022-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-07-01: :class:`PrivateLinkResourcesOperations` + * 2022-07-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-08-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-08-03-preview: :class:`PrivateLinkResourcesOperations` + * 2022-09-01: :class:`PrivateLinkResourcesOperations` + * 2022-09-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-10-02-preview: :class:`PrivateLinkResourcesOperations` + * 2022-11-01: :class:`PrivateLinkResourcesOperations` + * 2022-11-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-01-01: :class:`PrivateLinkResourcesOperations` + * 2023-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-02-01: :class:`PrivateLinkResourcesOperations` + * 2023-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-03-01: :class:`PrivateLinkResourcesOperations` + * 2023-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-04-01: :class:`PrivateLinkResourcesOperations` + * 2023-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-05-01: :class:`PrivateLinkResourcesOperations` + * 2023-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-06-01: :class:`PrivateLinkResourcesOperations` + * 2023-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-07-01: :class:`PrivateLinkResourcesOperations` + * 2023-07-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-08-01: :class:`PrivateLinkResourcesOperations` + * 2023-08-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-09-01: :class:`PrivateLinkResourcesOperations` + * 2023-09-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-10-01: :class:`PrivateLinkResourcesOperations` + * 2023-10-02-preview: :class:`PrivateLinkResourcesOperations` + * 2023-11-01: :class:`PrivateLinkResourcesOperations` + * 2023-11-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-01-01: :class:`PrivateLinkResourcesOperations` + * 2024-01-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-02-01: :class:`PrivateLinkResourcesOperations` + * 2024-02-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-03-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-04-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-05-01: :class:`PrivateLinkResourcesOperations` + * 2024-05-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-06-02-preview: :class:`PrivateLinkResourcesOperations` + * 2024-07-01: :class:`PrivateLinkResourcesOperations` + """ + api_version = self._get_api_version('private_link_resources') + if api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def resolve_private_link_service_id(self): + """Instance depends on the API version: + + * 2020-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2020-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2020-12-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-08-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-10-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2021-11-01-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-04-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-06-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-08-03-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2022-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-03-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-04-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-06-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-08-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-09-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-10-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-11-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2023-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-01-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-02-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-05-01: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` + * 2024-07-01: :class:`ResolvePrivateLinkServiceIdOperations` + """ + api_version = self._get_api_version('resolve_private_link_service_id') + if api_version == '2020-09-01': + from ..v2020_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2020-11-01': + from ..v2020_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2020-12-01': + from ..v2020_12_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-02-01': + from ..v2021_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-05-01': + from ..v2021_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-07-01': + from ..v2021_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'resolve_private_link_service_id'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def snapshots(self): + """Instance depends on the API version: + + * 2021-08-01: :class:`SnapshotsOperations` + * 2021-09-01: :class:`SnapshotsOperations` + * 2021-10-01: :class:`SnapshotsOperations` + * 2021-11-01-preview: :class:`SnapshotsOperations` + * 2022-01-01: :class:`SnapshotsOperations` + * 2022-01-02-preview: :class:`SnapshotsOperations` + * 2022-02-01: :class:`SnapshotsOperations` + * 2022-02-02-preview: :class:`SnapshotsOperations` + * 2022-03-01: :class:`SnapshotsOperations` + * 2022-03-02-preview: :class:`SnapshotsOperations` + * 2022-04-01: :class:`SnapshotsOperations` + * 2022-04-02-preview: :class:`SnapshotsOperations` + * 2022-05-02-preview: :class:`SnapshotsOperations` + * 2022-06-01: :class:`SnapshotsOperations` + * 2022-06-02-preview: :class:`SnapshotsOperations` + * 2022-07-01: :class:`SnapshotsOperations` + * 2022-07-02-preview: :class:`SnapshotsOperations` + * 2022-08-02-preview: :class:`SnapshotsOperations` + * 2022-08-03-preview: :class:`SnapshotsOperations` + * 2022-09-01: :class:`SnapshotsOperations` + * 2022-09-02-preview: :class:`SnapshotsOperations` + * 2022-10-02-preview: :class:`SnapshotsOperations` + * 2022-11-01: :class:`SnapshotsOperations` + * 2022-11-02-preview: :class:`SnapshotsOperations` + * 2023-01-01: :class:`SnapshotsOperations` + * 2023-01-02-preview: :class:`SnapshotsOperations` + * 2023-02-01: :class:`SnapshotsOperations` + * 2023-02-02-preview: :class:`SnapshotsOperations` + * 2023-03-01: :class:`SnapshotsOperations` + * 2023-03-02-preview: :class:`SnapshotsOperations` + * 2023-04-01: :class:`SnapshotsOperations` + * 2023-04-02-preview: :class:`SnapshotsOperations` + * 2023-05-01: :class:`SnapshotsOperations` + * 2023-05-02-preview: :class:`SnapshotsOperations` + * 2023-06-01: :class:`SnapshotsOperations` + * 2023-06-02-preview: :class:`SnapshotsOperations` + * 2023-07-01: :class:`SnapshotsOperations` + * 2023-07-02-preview: :class:`SnapshotsOperations` + * 2023-08-01: :class:`SnapshotsOperations` + * 2023-08-02-preview: :class:`SnapshotsOperations` + * 2023-09-01: :class:`SnapshotsOperations` + * 2023-09-02-preview: :class:`SnapshotsOperations` + * 2023-10-01: :class:`SnapshotsOperations` + * 2023-10-02-preview: :class:`SnapshotsOperations` + * 2023-11-01: :class:`SnapshotsOperations` + * 2023-11-02-preview: :class:`SnapshotsOperations` + * 2024-01-01: :class:`SnapshotsOperations` + * 2024-01-02-preview: :class:`SnapshotsOperations` + * 2024-02-01: :class:`SnapshotsOperations` + * 2024-02-02-preview: :class:`SnapshotsOperations` + * 2024-03-02-preview: :class:`SnapshotsOperations` + * 2024-04-02-preview: :class:`SnapshotsOperations` + * 2024-05-01: :class:`SnapshotsOperations` + * 2024-05-02-preview: :class:`SnapshotsOperations` + * 2024-06-02-preview: :class:`SnapshotsOperations` + * 2024-07-01: :class:`SnapshotsOperations` + """ + api_version = self._get_api_version('snapshots') + if api_version == '2021-08-01': + from ..v2021_08_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-10-01': + from ..v2021_10_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-01-01': + from ..v2022_01_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-01-02-preview': + from ..v2022_01_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-02-01': + from ..v2022_02_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-02-02-preview': + from ..v2022_02_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-03-02-preview': + from ..v2022_03_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-04-01': + from ..v2022_04_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-06-01': + from ..v2022_06_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-09-01': + from ..v2022_09_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-01': + from ..v2023_01_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-02-01': + from ..v2023_02_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-03-01': + from ..v2023_03_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-04-01': + from ..v2023_04_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-06-01': + from ..v2023_06_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-07-01': + from ..v2023_07_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-08-01': + from ..v2023_08_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import SnapshotsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def trusted_access_role_bindings(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-08-03-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2022-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-09-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-10-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2023-11-01: :class:`TrustedAccessRoleBindingsOperations` + * 2023-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-01-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-02-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-05-01: :class:`TrustedAccessRoleBindingsOperations` + * 2024-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` + * 2024-07-01: :class:`TrustedAccessRoleBindingsOperations` + """ + api_version = self._get_api_version('trusted_access_role_bindings') + if api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'trusted_access_role_bindings'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def trusted_access_roles(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-07-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-08-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-08-03-preview: :class:`TrustedAccessRolesOperations` + * 2022-09-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-10-02-preview: :class:`TrustedAccessRolesOperations` + * 2022-11-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-01-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-02-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-03-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-07-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-08-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-09-01: :class:`TrustedAccessRolesOperations` + * 2023-09-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-10-01: :class:`TrustedAccessRolesOperations` + * 2023-10-02-preview: :class:`TrustedAccessRolesOperations` + * 2023-11-01: :class:`TrustedAccessRolesOperations` + * 2023-11-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-01-01: :class:`TrustedAccessRolesOperations` + * 2024-01-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-02-01: :class:`TrustedAccessRolesOperations` + * 2024-02-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-03-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-04-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-05-01: :class:`TrustedAccessRolesOperations` + * 2024-05-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-06-02-preview: :class:`TrustedAccessRolesOperations` + * 2024-07-01: :class:`TrustedAccessRolesOperations` + """ + api_version = self._get_api_version('trusted_access_roles') + if api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-05-02-preview': + from ..v2022_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-06-02-preview': + from ..v2022_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-07-02-preview': + from ..v2022_07_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-08-02-preview': + from ..v2022_08_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-08-03-preview': + from ..v2022_08_03_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-09-02-preview': + from ..v2022_09_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-10-02-preview': + from ..v2022_10_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2022-11-02-preview': + from ..v2022_11_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-01-02-preview': + from ..v2023_01_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-02-02-preview': + from ..v2023_02_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-03-02-preview': + from ..v2023_03_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-04-02-preview': + from ..v2023_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-05-02-preview': + from ..v2023_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-06-02-preview': + from ..v2023_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-07-02-preview': + from ..v2023_07_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-08-02-preview': + from ..v2023_08_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-09-01': + from ..v2023_09_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-09-02-preview': + from ..v2023_09_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-10-01': + from ..v2023_10_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-10-02-preview': + from ..v2023_10_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-11-01': + from ..v2023_11_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2023-11-02-preview': + from ..v2023_11_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-01-01': + from ..v2024_01_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-01-02-preview': + from ..v2024_01_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-02-01': + from ..v2024_02_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-02-02-preview': + from ..v2024_02_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-03-02-preview': + from ..v2024_03_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-04-02-preview': + from ..v2024_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-05-01': + from ..v2024_05_01.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-05-02-preview': + from ..v2024_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-06-02-preview': + from ..v2024_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass + elif api_version == '2024-07-01': + from ..v2024_07_01.aio.operations import TrustedAccessRolesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'trusted_access_roles'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py new file mode 100644 index 00000000000..f3658f614ee --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py @@ -0,0 +1,11 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from .v2019_04_01.models import * +from .v2019_04_30.models import * +from .v2022_09_02_preview.models import * +from .v2024_06_02_preview.models import * +from .v2024_07_01.models import * diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/py.typed b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py new file mode 100644 index 00000000000..4f004a298ed --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._container_service_client import ContainerServiceClient +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ContainerServiceClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py new file mode 100644 index 00000000000..8a2cfba3ad4 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ContainerServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py new file mode 100644 index 00000000000..37b41101ae1 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse +from azure.mgmt.core import ARMPipelineClient +from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy + +from . import models as _models +from .._serialization import Deserializer, Serializer +from ._configuration import ContainerServiceClientConfiguration +from .operations import ( + AgentPoolsOperations, + MachinesOperations, + MaintenanceConfigurationsOperations, + ManagedClustersOperations, + Operations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + ResolvePrivateLinkServiceIdOperations, + SnapshotsOperations, + TrustedAccessRoleBindingsOperations, + TrustedAccessRolesOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes + """The Container Service Client. + + :ivar operations: Operations operations + :vartype operations: azure.mgmt.containerservice.v2024_07_01.operations.Operations + :ivar managed_clusters: ManagedClustersOperations operations + :vartype managed_clusters: + azure.mgmt.containerservice.v2024_07_01.operations.ManagedClustersOperations + :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations + :vartype maintenance_configurations: + azure.mgmt.containerservice.v2024_07_01.operations.MaintenanceConfigurationsOperations + :ivar agent_pools: AgentPoolsOperations operations + :vartype agent_pools: azure.mgmt.containerservice.v2024_07_01.operations.AgentPoolsOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.containerservice.v2024_07_01.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.containerservice.v2024_07_01.operations.PrivateLinkResourcesOperations + :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations + :vartype resolve_private_link_service_id: + azure.mgmt.containerservice.v2024_07_01.operations.ResolvePrivateLinkServiceIdOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.containerservice.v2024_07_01.operations.SnapshotsOperations + :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations + :vartype trusted_access_role_bindings: + azure.mgmt.containerservice.v2024_07_01.operations.TrustedAccessRoleBindingsOperations + :ivar trusted_access_roles: TrustedAccessRolesOperations operations + :vartype trusted_access_roles: + azure.mgmt.containerservice.v2024_07_01.operations.TrustedAccessRolesOperations + :ivar machines: MachinesOperations operations + :vartype machines: azure.mgmt.containerservice.v2024_07_01.operations.MachinesOperations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = ContainerServiceClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + ARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") + self.managed_clusters = ManagedClustersOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.maintenance_configurations = MaintenanceConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.agent_pools = AgentPoolsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.snapshots = SnapshotsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.trusted_access_roles = TrustedAccessRolesOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") + + def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py new file mode 100644 index 00000000000..cf831534071 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "32.0.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py new file mode 100644 index 00000000000..d14e96ddb36 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._container_service_client import ContainerServiceClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ContainerServiceClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py new file mode 100644 index 00000000000..b68fcb866fc --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ContainerServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py new file mode 100644 index 00000000000..5d4c2c0b25c --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.mgmt.core import AsyncARMPipelineClient +from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy + +from .. import models as _models +from ..._serialization import Deserializer, Serializer +from ._configuration import ContainerServiceClientConfiguration +from .operations import ( + AgentPoolsOperations, + MachinesOperations, + MaintenanceConfigurationsOperations, + ManagedClustersOperations, + Operations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + ResolvePrivateLinkServiceIdOperations, + SnapshotsOperations, + TrustedAccessRoleBindingsOperations, + TrustedAccessRolesOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes + """The Container Service Client. + + :ivar operations: Operations operations + :vartype operations: azure.mgmt.containerservice.v2024_07_01.aio.operations.Operations + :ivar managed_clusters: ManagedClustersOperations operations + :vartype managed_clusters: + azure.mgmt.containerservice.v2024_07_01.aio.operations.ManagedClustersOperations + :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations + :vartype maintenance_configurations: + azure.mgmt.containerservice.v2024_07_01.aio.operations.MaintenanceConfigurationsOperations + :ivar agent_pools: AgentPoolsOperations operations + :vartype agent_pools: + azure.mgmt.containerservice.v2024_07_01.aio.operations.AgentPoolsOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.containerservice.v2024_07_01.aio.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.containerservice.v2024_07_01.aio.operations.PrivateLinkResourcesOperations + :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations + :vartype resolve_private_link_service_id: + azure.mgmt.containerservice.v2024_07_01.aio.operations.ResolvePrivateLinkServiceIdOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.containerservice.v2024_07_01.aio.operations.SnapshotsOperations + :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations + :vartype trusted_access_role_bindings: + azure.mgmt.containerservice.v2024_07_01.aio.operations.TrustedAccessRoleBindingsOperations + :ivar trusted_access_roles: TrustedAccessRolesOperations operations + :vartype trusted_access_roles: + azure.mgmt.containerservice.v2024_07_01.aio.operations.TrustedAccessRolesOperations + :ivar machines: MachinesOperations operations + :vartype machines: azure.mgmt.containerservice.v2024_07_01.aio.operations.MachinesOperations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = ContainerServiceClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + AsyncARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") + self.managed_clusters = ManagedClustersOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.maintenance_configurations = MaintenanceConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.agent_pools = AgentPoolsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.snapshots = SnapshotsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.trusted_access_roles = TrustedAccessRolesOperations( + self._client, self._config, self._serialize, self._deserialize, "2024-07-01" + ) + self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") + + def _send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py new file mode 100644 index 00000000000..d59e9e8a9f1 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import Operations +from ._managed_clusters_operations import ManagedClustersOperations +from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations +from ._agent_pools_operations import AgentPoolsOperations +from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations +from ._private_link_resources_operations import PrivateLinkResourcesOperations +from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations +from ._snapshots_operations import SnapshotsOperations +from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations +from ._trusted_access_roles_operations import TrustedAccessRolesOperations +from ._machines_operations import MachinesOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Operations", + "ManagedClustersOperations", + "MaintenanceConfigurationsOperations", + "AgentPoolsOperations", + "PrivateEndpointConnectionsOperations", + "PrivateLinkResourcesOperations", + "ResolvePrivateLinkServiceIdOperations", + "SnapshotsOperations", + "TrustedAccessRoleBindingsOperations", + "TrustedAccessRolesOperations", + "MachinesOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py new file mode 100644 index 00000000000..509be147632 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py @@ -0,0 +1,1134 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ...operations._agent_pools_operations import ( + build_abort_latest_operation_request, + build_create_or_update_request, + build_delete_machines_request, + build_delete_request, + build_get_available_agent_pool_versions_request, + build_get_request, + build_get_upgrade_profile_request, + build_list_request, + build_upgrade_node_image_version_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AgentPoolsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`agent_pools` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _abort_latest_operation_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_abort_latest_operation_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["location"] = self._deserialize("str", response.headers.get("location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_abort_latest_operation( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Aborts last operation running on agent pool. + + Aborts the currently running operation on the agent pool. The Agent Pool will be moved to a + Canceling state and eventually to a Canceled state when cancellation finishes. If the operation + completes before cancellation can take place, a 409 error code is returned. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._abort_latest_operation_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterable["_models.AgentPool"]: + """Gets a list of agent pools in the specified managed cluster. + + Gets a list of agent pools in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either AgentPool or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("AgentPoolListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> _models.AgentPool: + """Gets the specified managed cluster agent pool. + + Gets the specified managed cluster agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: AgentPool or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: Union[_models.AgentPool, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "AgentPool") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: _models.AgentPool, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either AgentPool or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either AgentPool or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: Union[_models.AgentPool, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Is either a AgentPool type or a + IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool or IO[bytes] + :return: An instance of AsyncLROPoller that returns either AgentPool or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AgentPool].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AgentPool]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes an agent pool in the specified managed cluster. + + Deletes an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace_async + async def get_upgrade_profile( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> _models.AgentPoolUpgradeProfile: + """Gets the upgrade profile for an agent pool. + + Gets the upgrade profile for an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: AgentPoolUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _delete_machines_initial( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(machines, (IOBase, bytes)): + _content = machines + else: + _json = self._serialize.body(machines, "AgentPoolDeleteMachinesParameter") + + _request = build_delete_machines_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: _models.AgentPoolDeleteMachinesParameter, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Required. + :type machines: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Required. + :type machines: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Is either a + AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required. + :type machines: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter or IO[bytes] + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_machines_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + machines=machines, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace_async + async def get_available_agent_pool_versions( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.AgentPoolAvailableVersions: + """Gets a list of supported Kubernetes versions for the specified agent pool. + + See `supported Kubernetes versions + `_ for more details about + the version lifecycle. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: AgentPoolAvailableVersions or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersions + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None) + + _request = build_get_available_agent_pool_versions_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _upgrade_node_image_version_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_upgrade_node_image_version_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_upgrade_node_image_version( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncLROPoller[_models.AgentPool]: + """Upgrades the node image version of an agent pool to the latest. + + Upgrading the node image version of an agent pool applies the newest OS and runtime updates to + the nodes. AKS provides one new image per week with the latest updates. For more details on + node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of AsyncLROPoller that returns either AgentPool or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._upgrade_node_image_version_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AgentPool].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AgentPool]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py new file mode 100644 index 00000000000..d52176e2962 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py @@ -0,0 +1,212 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._machines_operations import build_get_request, build_list_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class MachinesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`machines` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> AsyncIterable["_models.Machine"]: + """Gets a list of machines in the specified agent pool. + + Gets a list of machines in the specified agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An iterator like instance of either Machine or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("MachineListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, machine_name: str, **kwargs: Any + ) -> _models.Machine: + """Get a specific machine in the specified agent pool. + + Get a specific machine in the specified agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machine_name: host name of the machine. Required. + :type machine_name: str + :return: Machine or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Machine + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.Machine] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + machine_name=machine_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Machine", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py new file mode 100644 index 00000000000..aab6a21a3cc --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py @@ -0,0 +1,418 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._maintenance_configurations_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_by_managed_cluster_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class MaintenanceConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`maintenance_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list_by_managed_cluster( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterable["_models.MaintenanceConfiguration"]: + """Gets a list of maintenance configurations in the specified managed cluster. + + Gets a list of maintenance configurations in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either MaintenanceConfiguration or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_managed_cluster_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Gets the specified maintenance configuration of a managed cluster. + + Gets the specified maintenance configuration of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: _models.MaintenanceConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: Union[_models.MaintenanceConfiguration, IO[bytes]], + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Is either a + MaintenanceConfiguration type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration or + IO[bytes] + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "MaintenanceConfiguration") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any + ) -> None: + """Deletes a maintenance configuration. + + Deletes a maintenance configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py new file mode 100644 index 00000000000..1b6b663949f --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py @@ -0,0 +1,2822 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ...operations._managed_clusters_operations import ( + build_abort_latest_operation_request, + build_create_or_update_request, + build_delete_request, + build_get_access_profile_request, + build_get_command_result_request, + build_get_mesh_revision_profile_request, + build_get_mesh_upgrade_profile_request, + build_get_request, + build_get_upgrade_profile_request, + build_list_by_resource_group_request, + build_list_cluster_admin_credentials_request, + build_list_cluster_monitoring_user_credentials_request, + build_list_cluster_user_credentials_request, + build_list_kubernetes_versions_request, + build_list_mesh_revision_profiles_request, + build_list_mesh_upgrade_profiles_request, + build_list_outbound_network_dependencies_endpoints_request, + build_list_request, + build_reset_aad_profile_request, + build_reset_service_principal_profile_request, + build_rotate_cluster_certificates_request, + build_rotate_service_account_signing_keys_request, + build_run_command_request, + build_start_request, + build_stop_request, + build_update_tags_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ManagedClustersOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`managed_clusters` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult: + """Gets a list of supported Kubernetes versions in the specified subscription. + + Contains extra metadata on the version, including supported patch versions, capabilities, + available upgrades, and details on preview status of the version. + + :param location: The name of the Azure region. Required. + :type location: str + :return: KubernetesVersionListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None) + + _request = build_list_kubernetes_versions_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.ManagedCluster"]: + """Gets a list of managed clusters in the specified subscription. + + Gets a list of managed clusters in the specified subscription. + + :return: An iterator like instance of either ManagedCluster or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedClusterListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_resource_group( + self, resource_group_name: str, **kwargs: Any + ) -> AsyncIterable["_models.ManagedCluster"]: + """Lists managed clusters in the specified subscription and resource group. + + Lists managed clusters in the specified subscription and resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of either ManagedCluster or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedClusterListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_upgrade_profile( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.ManagedClusterUpgradeProfile: + """Gets the upgrade profile of a managed cluster. + + Gets the upgrade profile of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: ManagedClusterUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_access_profile( + self, resource_group_name: str, resource_name: str, role_name: str, **kwargs: Any + ) -> _models.ManagedClusterAccessProfile: + """Gets an access profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Instead use `ListClusterUserCredentials + `_ or + `ListClusterAdminCredentials + `_ . + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param role_name: The name of the role for managed cluster accessProfile resource. Required. + :type role_name: str + :return: ManagedClusterAccessProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAccessProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None) + + _request = build_get_access_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + role_name=role_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_cluster_admin_credentials( + self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the admin credentials of a managed cluster. + + Lists the admin credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_admin_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_cluster_user_credentials( + self, + resource_group_name: str, + resource_name: str, + server_fqdn: Optional[str] = None, + format: Optional[Union[str, _models.Format]] = None, + **kwargs: Any + ) -> _models.CredentialResults: + """Lists the user credentials of a managed cluster. + + Lists the user credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :param format: Only apply to AAD clusters, specifies the format of returned kubeconfig. Format + 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format + kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and + "exec". Default value is None. + :type format: str or ~azure.mgmt.containerservice.v2024_07_01.models.Format + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_user_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + format=format, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_cluster_monitoring_user_credentials( + self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the cluster monitoring user credentials of a managed cluster. + + Lists the cluster monitoring user credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_monitoring_user_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedCluster: + """Gets a managed cluster. + + Gets a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: ManagedCluster or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedCluster, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedCluster") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedCluster, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedCluster, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or + a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster or IO[bytes] + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.ManagedCluster].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.ManagedCluster]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _update_tags_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_tags_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either + a TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_tags_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.ManagedCluster].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.ManagedCluster]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Deletes a managed cluster. + + Deletes a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _reset_service_principal_profile_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedClusterServicePrincipalProfile") + + _request = build_reset_service_principal_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedClusterServicePrincipalProfile, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Required. + :type parameters: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Is either a + ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required. + :type parameters: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile or + IO[bytes] + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._reset_service_principal_profile_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _reset_aad_profile_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedClusterAADProfile") + + _request = build_reset_aad_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedClusterAADProfile, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Is either a + ManagedClusterAADProfile type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile or + IO[bytes] + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._reset_aad_profile_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _rotate_cluster_certificates_initial( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_rotate_cluster_certificates_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_rotate_cluster_certificates( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Rotates the certificates of a managed cluster. + + See `Certificate rotation `_ for + more details about rotating managed cluster certificates. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._rotate_cluster_certificates_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _abort_latest_operation_initial( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_abort_latest_operation_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["location"] = self._deserialize("str", response.headers.get("location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_abort_latest_operation( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Aborts last operation running on managed cluster. + + Aborts the currently running operation on the managed cluster. The Managed Cluster will be + moved to a Canceling state and eventually to a Canceled state when cancellation finishes. If + the operation completes before cancellation can take place, a 409 error code is returned. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._abort_latest_operation_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_rotate_service_account_signing_keys_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_rotate_service_account_signing_keys( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Rotates the service account signing keys of a managed cluster. + + Rotates the service account signing keys of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._rotate_service_account_signing_keys_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_stop_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Stops a Managed Cluster. + + This can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a + cluster stops the control plane and agent nodes entirely, while maintaining all object and + cluster state. A cluster does not accrue charges while it is stopped. See `stopping a cluster + `_ for more details about stopping a + cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._stop_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_start_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Starts a previously stopped Managed Cluster. + + See `starting a cluster `_ for more + details about starting a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._start_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _run_command_initial( + self, + resource_group_name: str, + resource_name: str, + request_payload: Union[_models.RunCommandRequest, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(request_payload, (IOBase, bytes)): + _content = request_payload + else: + _json = self._serialize.body(request_payload, "RunCommandRequest") + + _request = build_run_command_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: _models.RunCommandRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Required. + :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Required. + :type request_payload: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: Union[_models.RunCommandRequest, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Is either a RunCommandRequest type or a + IO[bytes] type. Required. + :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest or + IO[bytes] + :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._run_command_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + request_payload=request_payload, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.RunCommandResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.RunCommandResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace_async + async def get_command_result( + self, resource_group_name: str, resource_name: str, command_id: str, **kwargs: Any + ) -> Optional[_models.RunCommandResult]: + """Gets the results of a command which has been run on the Managed Cluster. + + Gets the results of a command which has been run on the Managed Cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param command_id: Id of the command. Required. + :type command_id: str + :return: RunCommandResult or None or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult or None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None) + + _request = build_get_command_result_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + command_id=command_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + response_headers = {} + if response.status_code == 200: + deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response) + + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterable["_models.OutboundEnvironmentEndpoint"]: + """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the + specified managed cluster. + + Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the + specified managed cluster. The operation returns properties of each egress endpoint. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_outbound_network_dependencies_endpoints_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> AsyncIterable["_models.MeshRevisionProfile"]: + """Lists mesh revision profiles for all meshes in the specified location. + + Contains extra metadata on each revision, including supported revisions, cluster compatibility + and available upgrades. + + :param location: The name of the Azure region. Required. + :type location: str + :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_mesh_revision_profiles_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("MeshRevisionProfileList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> _models.MeshRevisionProfile: + """Gets a mesh revision profile for a specified mesh in the specified location. + + Contains extra metadata on the revision, including supported revisions, cluster compatibility + and available upgrades. + + :param location: The name of the Azure region. Required. + :type location: str + :param mode: The mode of the mesh. Required. + :type mode: str + :return: MeshRevisionProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None) + + _request = build_get_mesh_revision_profile_request( + location=location, + mode=mode, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_mesh_upgrade_profiles( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterable["_models.MeshUpgradeProfile"]: + """Lists available upgrades for all service meshes in a specific cluster. + + Lists available upgrades for all service meshes in a specific cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_mesh_upgrade_profiles_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("MeshUpgradeProfileList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_mesh_upgrade_profile( + self, resource_group_name: str, resource_name: str, mode: str, **kwargs: Any + ) -> _models.MeshUpgradeProfile: + """Gets available upgrades for a service mesh in a cluster. + + Gets available upgrades for a service mesh in a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param mode: The mode of the mesh. Required. + :type mode: str + :return: MeshUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_mesh_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + mode=mode, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py new file mode 100644 index 00000000000..c8177ce87bf --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py @@ -0,0 +1,133 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._operations import build_list_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`operations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationValue"]: + """Gets a list of operations. + + Gets a list of operations. + + :return: An iterator like instance of either OperationValue or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("OperationListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py new file mode 100644 index 00000000000..2058d08a3f5 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py @@ -0,0 +1,446 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ...operations._private_endpoint_connections_operations import ( + build_delete_request, + build_get_request, + build_list_request, + build_update_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class PrivateEndpointConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`private_endpoint_connections` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnectionListResult: + """Gets a list of private endpoint connections in the specified managed cluster. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: PrivateEndpointConnectionListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets the specified private endpoint connection. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, IO[bytes]], + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Is either a + PrivateEndpointConnection type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection or + IO[bytes] + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "PrivateEndpointConnection") + + _request = build_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _delete_initial( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a private endpoint connection. + + Deletes a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py new file mode 100644 index 00000000000..91035e80000 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py @@ -0,0 +1,115 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, Optional, Type, TypeVar + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._private_link_resources_operations import build_list_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class PrivateLinkResourcesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`private_link_resources` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.PrivateLinkResourcesListResult: + """Gets a list of private link resources in the specified managed cluster. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: PrivateLinkResourcesListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResourcesListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None) + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py new file mode 100644 index 00000000000..393a167f652 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py @@ -0,0 +1,193 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._resolve_private_link_service_id_operations import build_post_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ResolvePrivateLinkServiceIdOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`resolve_private_link_service_id` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @overload + async def post( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.PrivateLinkResource, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def post( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def post( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.PrivateLinkResource, IO[bytes]], + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Is either + a PrivateLinkResource type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource or + IO[bytes] + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "PrivateLinkResource") + + _request = build_post_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py new file mode 100644 index 00000000000..aaece305004 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py @@ -0,0 +1,608 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._snapshots_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_by_resource_group_request, + build_list_request, + build_update_tags_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class SnapshotsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`snapshots` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + """Gets a list of snapshots in the specified subscription. + + Gets a list of snapshots in the specified subscription. + + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + """Lists snapshots in the specified subscription and resource group. + + Lists snapshots in the specified subscription and resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.Snapshot: + """Gets a snapshot. + + Gets a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.Snapshot, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.Snapshot, IO[bytes]], + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes] + type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot or IO[bytes] + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "Snapshot") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a + TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_tags_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> None: + """Deletes a snapshot. + + Deletes a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py new file mode 100644 index 00000000000..168cdaa7f99 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py @@ -0,0 +1,549 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ...operations._trusted_access_role_bindings_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class TrustedAccessRoleBindingsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`trusted_access_role_bindings` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncIterable["_models.TrustedAccessRoleBinding"]: + """List trusted access role bindings. + + List trusted access role bindings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either TrustedAccessRoleBinding or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleBindingListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> _models.TrustedAccessRoleBinding: + """Get a trusted access role binding. + + Get a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :return: TrustedAccessRoleBinding or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(trusted_access_role_binding, (IOBase, bytes)): + _content = trusted_access_role_binding + else: + _json = self._serialize.body(trusted_access_role_binding, "TrustedAccessRoleBinding") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: _models.TrustedAccessRoleBinding, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Required. + :type trusted_access_role_binding: + ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Required. + :type trusted_access_role_binding: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Is either a + TrustedAccessRoleBinding type or a IO[bytes] type. Required. + :type trusted_access_role_binding: + ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding or IO[bytes] + :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + trusted_access_role_binding=trusted_access_role_binding, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.TrustedAccessRoleBinding].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.TrustedAccessRoleBinding]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Delete a trusted access role binding. + + Delete a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py new file mode 100644 index 00000000000..c00f5e79d57 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py @@ -0,0 +1,137 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ...operations._trusted_access_roles_operations import build_list_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class TrustedAccessRolesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :attr:`trusted_access_roles` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.TrustedAccessRole"]: + """List supported trusted access roles. + + List supported trusted access roles. + + :param location: The name of the Azure region. Required. + :type location: str + :return: An iterator like instance of either TrustedAccessRole or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py new file mode 100644 index 00000000000..8b172e7eb5c --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py @@ -0,0 +1,407 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models_py3 import AbsoluteMonthlySchedule +from ._models_py3 import AgentPool +from ._models_py3 import AgentPoolAvailableVersions +from ._models_py3 import AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem +from ._models_py3 import AgentPoolDeleteMachinesParameter +from ._models_py3 import AgentPoolListResult +from ._models_py3 import AgentPoolNetworkProfile +from ._models_py3 import AgentPoolSecurityProfile +from ._models_py3 import AgentPoolUpgradeProfile +from ._models_py3 import AgentPoolUpgradeProfilePropertiesUpgradesItem +from ._models_py3 import AgentPoolUpgradeSettings +from ._models_py3 import AgentPoolWindowsProfile +from ._models_py3 import AzureKeyVaultKms +from ._models_py3 import CloudErrorBody +from ._models_py3 import ClusterUpgradeSettings +from ._models_py3 import CompatibleVersions +from ._models_py3 import ContainerServiceLinuxProfile +from ._models_py3 import ContainerServiceNetworkProfile +from ._models_py3 import ContainerServiceSshConfiguration +from ._models_py3 import ContainerServiceSshPublicKey +from ._models_py3 import CreationData +from ._models_py3 import CredentialResult +from ._models_py3 import CredentialResults +from ._models_py3 import DailySchedule +from ._models_py3 import DateSpan +from ._models_py3 import DelegatedResource +from ._models_py3 import EndpointDependency +from ._models_py3 import EndpointDetail +from ._models_py3 import ErrorAdditionalInfo +from ._models_py3 import ErrorDetail +from ._models_py3 import ErrorResponse +from ._models_py3 import ExtendedLocation +from ._models_py3 import IPTag +from ._models_py3 import IstioCertificateAuthority +from ._models_py3 import IstioComponents +from ._models_py3 import IstioEgressGateway +from ._models_py3 import IstioIngressGateway +from ._models_py3 import IstioPluginCertificateAuthority +from ._models_py3 import IstioServiceMesh +from ._models_py3 import KubeletConfig +from ._models_py3 import KubernetesPatchVersion +from ._models_py3 import KubernetesVersion +from ._models_py3 import KubernetesVersionCapabilities +from ._models_py3 import KubernetesVersionListResult +from ._models_py3 import LinuxOSConfig +from ._models_py3 import Machine +from ._models_py3 import MachineIpAddress +from ._models_py3 import MachineListResult +from ._models_py3 import MachineNetworkProperties +from ._models_py3 import MachineProperties +from ._models_py3 import MaintenanceConfiguration +from ._models_py3 import MaintenanceConfigurationListResult +from ._models_py3 import MaintenanceWindow +from ._models_py3 import ManagedCluster +from ._models_py3 import ManagedClusterAADProfile +from ._models_py3 import ManagedClusterAPIServerAccessProfile +from ._models_py3 import ManagedClusterAccessProfile +from ._models_py3 import ManagedClusterAddonProfile +from ._models_py3 import ManagedClusterAddonProfileIdentity +from ._models_py3 import ManagedClusterAgentPoolProfile +from ._models_py3 import ManagedClusterAgentPoolProfileProperties +from ._models_py3 import ManagedClusterAutoUpgradeProfile +from ._models_py3 import ManagedClusterAzureMonitorProfile +from ._models_py3 import ManagedClusterAzureMonitorProfileKubeStateMetrics +from ._models_py3 import ManagedClusterAzureMonitorProfileMetrics +from ._models_py3 import ManagedClusterCostAnalysis +from ._models_py3 import ManagedClusterHTTPProxyConfig +from ._models_py3 import ManagedClusterIdentity +from ._models_py3 import ManagedClusterIngressProfile +from ._models_py3 import ManagedClusterIngressProfileWebAppRouting +from ._models_py3 import ManagedClusterListResult +from ._models_py3 import ManagedClusterLoadBalancerProfile +from ._models_py3 import ManagedClusterLoadBalancerProfileManagedOutboundIPs +from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPPrefixes +from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPs +from ._models_py3 import ManagedClusterManagedOutboundIPProfile +from ._models_py3 import ManagedClusterMetricsProfile +from ._models_py3 import ManagedClusterNATGatewayProfile +from ._models_py3 import ManagedClusterOIDCIssuerProfile +from ._models_py3 import ManagedClusterPodIdentity +from ._models_py3 import ManagedClusterPodIdentityException +from ._models_py3 import ManagedClusterPodIdentityProfile +from ._models_py3 import ManagedClusterPodIdentityProvisioningError +from ._models_py3 import ManagedClusterPodIdentityProvisioningErrorBody +from ._models_py3 import ManagedClusterPodIdentityProvisioningInfo +from ._models_py3 import ManagedClusterPoolUpgradeProfile +from ._models_py3 import ManagedClusterPoolUpgradeProfileUpgradesItem +from ._models_py3 import ManagedClusterPropertiesAutoScalerProfile +from ._models_py3 import ManagedClusterSKU +from ._models_py3 import ManagedClusterSecurityProfile +from ._models_py3 import ManagedClusterSecurityProfileDefender +from ._models_py3 import ManagedClusterSecurityProfileDefenderSecurityMonitoring +from ._models_py3 import ManagedClusterSecurityProfileImageCleaner +from ._models_py3 import ManagedClusterSecurityProfileWorkloadIdentity +from ._models_py3 import ManagedClusterServicePrincipalProfile +from ._models_py3 import ManagedClusterStorageProfile +from ._models_py3 import ManagedClusterStorageProfileBlobCSIDriver +from ._models_py3 import ManagedClusterStorageProfileDiskCSIDriver +from ._models_py3 import ManagedClusterStorageProfileFileCSIDriver +from ._models_py3 import ManagedClusterStorageProfileSnapshotController +from ._models_py3 import ManagedClusterUpgradeProfile +from ._models_py3 import ManagedClusterWindowsProfile +from ._models_py3 import ManagedClusterWorkloadAutoScalerProfile +from ._models_py3 import ManagedClusterWorkloadAutoScalerProfileKeda +from ._models_py3 import ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler +from ._models_py3 import ManagedServiceIdentityUserAssignedIdentitiesValue +from ._models_py3 import MeshRevision +from ._models_py3 import MeshRevisionProfile +from ._models_py3 import MeshRevisionProfileList +from ._models_py3 import MeshRevisionProfileProperties +from ._models_py3 import MeshUpgradeProfile +from ._models_py3 import MeshUpgradeProfileList +from ._models_py3 import MeshUpgradeProfileProperties +from ._models_py3 import OperationListResult +from ._models_py3 import OperationValue +from ._models_py3 import OutboundEnvironmentEndpoint +from ._models_py3 import OutboundEnvironmentEndpointCollection +from ._models_py3 import PortRange +from ._models_py3 import PowerState +from ._models_py3 import PrivateEndpoint +from ._models_py3 import PrivateEndpointConnection +from ._models_py3 import PrivateEndpointConnectionListResult +from ._models_py3 import PrivateLinkResource +from ._models_py3 import PrivateLinkResourcesListResult +from ._models_py3 import PrivateLinkServiceConnectionState +from ._models_py3 import ProxyResource +from ._models_py3 import RelativeMonthlySchedule +from ._models_py3 import Resource +from ._models_py3 import ResourceReference +from ._models_py3 import RunCommandRequest +from ._models_py3 import RunCommandResult +from ._models_py3 import Schedule +from ._models_py3 import ServiceMeshProfile +from ._models_py3 import Snapshot +from ._models_py3 import SnapshotListResult +from ._models_py3 import SubResource +from ._models_py3 import SysctlConfig +from ._models_py3 import SystemData +from ._models_py3 import TagsObject +from ._models_py3 import TimeInWeek +from ._models_py3 import TimeSpan +from ._models_py3 import TrackedResource +from ._models_py3 import TrustedAccessRole +from ._models_py3 import TrustedAccessRoleBinding +from ._models_py3 import TrustedAccessRoleBindingListResult +from ._models_py3 import TrustedAccessRoleListResult +from ._models_py3 import TrustedAccessRoleRule +from ._models_py3 import UpgradeOverrideSettings +from ._models_py3 import UserAssignedIdentity +from ._models_py3 import WeeklySchedule +from ._models_py3 import WindowsGmsaProfile + +from ._container_service_client_enums import AgentPoolMode +from ._container_service_client_enums import AgentPoolType +from ._container_service_client_enums import BackendPoolType +from ._container_service_client_enums import Code +from ._container_service_client_enums import ConnectionStatus +from ._container_service_client_enums import CreatedByType +from ._container_service_client_enums import Expander +from ._container_service_client_enums import ExtendedLocationTypes +from ._container_service_client_enums import Format +from ._container_service_client_enums import GPUInstanceProfile +from ._container_service_client_enums import IpFamily +from ._container_service_client_enums import IstioIngressGatewayMode +from ._container_service_client_enums import KeyVaultNetworkAccessTypes +from ._container_service_client_enums import KubeletDiskType +from ._container_service_client_enums import KubernetesSupportPlan +from ._container_service_client_enums import LicenseType +from ._container_service_client_enums import LoadBalancerSku +from ._container_service_client_enums import ManagedClusterPodIdentityProvisioningState +from ._container_service_client_enums import ManagedClusterSKUName +from ._container_service_client_enums import ManagedClusterSKUTier +from ._container_service_client_enums import NetworkDataplane +from ._container_service_client_enums import NetworkMode +from ._container_service_client_enums import NetworkPlugin +from ._container_service_client_enums import NetworkPluginMode +from ._container_service_client_enums import NetworkPolicy +from ._container_service_client_enums import NodeOSUpgradeChannel +from ._container_service_client_enums import OSDiskType +from ._container_service_client_enums import OSSKU +from ._container_service_client_enums import OSType +from ._container_service_client_enums import OutboundType +from ._container_service_client_enums import PrivateEndpointConnectionProvisioningState +from ._container_service_client_enums import Protocol +from ._container_service_client_enums import PublicNetworkAccess +from ._container_service_client_enums import ResourceIdentityType +from ._container_service_client_enums import ScaleDownMode +from ._container_service_client_enums import ScaleSetEvictionPolicy +from ._container_service_client_enums import ScaleSetPriority +from ._container_service_client_enums import ServiceMeshMode +from ._container_service_client_enums import SnapshotType +from ._container_service_client_enums import TrustedAccessRoleBindingProvisioningState +from ._container_service_client_enums import Type +from ._container_service_client_enums import UpgradeChannel +from ._container_service_client_enums import WeekDay +from ._container_service_client_enums import WorkloadRuntime +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AbsoluteMonthlySchedule", + "AgentPool", + "AgentPoolAvailableVersions", + "AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem", + "AgentPoolDeleteMachinesParameter", + "AgentPoolListResult", + "AgentPoolNetworkProfile", + "AgentPoolSecurityProfile", + "AgentPoolUpgradeProfile", + "AgentPoolUpgradeProfilePropertiesUpgradesItem", + "AgentPoolUpgradeSettings", + "AgentPoolWindowsProfile", + "AzureKeyVaultKms", + "CloudErrorBody", + "ClusterUpgradeSettings", + "CompatibleVersions", + "ContainerServiceLinuxProfile", + "ContainerServiceNetworkProfile", + "ContainerServiceSshConfiguration", + "ContainerServiceSshPublicKey", + "CreationData", + "CredentialResult", + "CredentialResults", + "DailySchedule", + "DateSpan", + "DelegatedResource", + "EndpointDependency", + "EndpointDetail", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "ExtendedLocation", + "IPTag", + "IstioCertificateAuthority", + "IstioComponents", + "IstioEgressGateway", + "IstioIngressGateway", + "IstioPluginCertificateAuthority", + "IstioServiceMesh", + "KubeletConfig", + "KubernetesPatchVersion", + "KubernetesVersion", + "KubernetesVersionCapabilities", + "KubernetesVersionListResult", + "LinuxOSConfig", + "Machine", + "MachineIpAddress", + "MachineListResult", + "MachineNetworkProperties", + "MachineProperties", + "MaintenanceConfiguration", + "MaintenanceConfigurationListResult", + "MaintenanceWindow", + "ManagedCluster", + "ManagedClusterAADProfile", + "ManagedClusterAPIServerAccessProfile", + "ManagedClusterAccessProfile", + "ManagedClusterAddonProfile", + "ManagedClusterAddonProfileIdentity", + "ManagedClusterAgentPoolProfile", + "ManagedClusterAgentPoolProfileProperties", + "ManagedClusterAutoUpgradeProfile", + "ManagedClusterAzureMonitorProfile", + "ManagedClusterAzureMonitorProfileKubeStateMetrics", + "ManagedClusterAzureMonitorProfileMetrics", + "ManagedClusterCostAnalysis", + "ManagedClusterHTTPProxyConfig", + "ManagedClusterIdentity", + "ManagedClusterIngressProfile", + "ManagedClusterIngressProfileWebAppRouting", + "ManagedClusterListResult", + "ManagedClusterLoadBalancerProfile", + "ManagedClusterLoadBalancerProfileManagedOutboundIPs", + "ManagedClusterLoadBalancerProfileOutboundIPPrefixes", + "ManagedClusterLoadBalancerProfileOutboundIPs", + "ManagedClusterManagedOutboundIPProfile", + "ManagedClusterMetricsProfile", + "ManagedClusterNATGatewayProfile", + "ManagedClusterOIDCIssuerProfile", + "ManagedClusterPodIdentity", + "ManagedClusterPodIdentityException", + "ManagedClusterPodIdentityProfile", + "ManagedClusterPodIdentityProvisioningError", + "ManagedClusterPodIdentityProvisioningErrorBody", + "ManagedClusterPodIdentityProvisioningInfo", + "ManagedClusterPoolUpgradeProfile", + "ManagedClusterPoolUpgradeProfileUpgradesItem", + "ManagedClusterPropertiesAutoScalerProfile", + "ManagedClusterSKU", + "ManagedClusterSecurityProfile", + "ManagedClusterSecurityProfileDefender", + "ManagedClusterSecurityProfileDefenderSecurityMonitoring", + "ManagedClusterSecurityProfileImageCleaner", + "ManagedClusterSecurityProfileWorkloadIdentity", + "ManagedClusterServicePrincipalProfile", + "ManagedClusterStorageProfile", + "ManagedClusterStorageProfileBlobCSIDriver", + "ManagedClusterStorageProfileDiskCSIDriver", + "ManagedClusterStorageProfileFileCSIDriver", + "ManagedClusterStorageProfileSnapshotController", + "ManagedClusterUpgradeProfile", + "ManagedClusterWindowsProfile", + "ManagedClusterWorkloadAutoScalerProfile", + "ManagedClusterWorkloadAutoScalerProfileKeda", + "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler", + "ManagedServiceIdentityUserAssignedIdentitiesValue", + "MeshRevision", + "MeshRevisionProfile", + "MeshRevisionProfileList", + "MeshRevisionProfileProperties", + "MeshUpgradeProfile", + "MeshUpgradeProfileList", + "MeshUpgradeProfileProperties", + "OperationListResult", + "OperationValue", + "OutboundEnvironmentEndpoint", + "OutboundEnvironmentEndpointCollection", + "PortRange", + "PowerState", + "PrivateEndpoint", + "PrivateEndpointConnection", + "PrivateEndpointConnectionListResult", + "PrivateLinkResource", + "PrivateLinkResourcesListResult", + "PrivateLinkServiceConnectionState", + "ProxyResource", + "RelativeMonthlySchedule", + "Resource", + "ResourceReference", + "RunCommandRequest", + "RunCommandResult", + "Schedule", + "ServiceMeshProfile", + "Snapshot", + "SnapshotListResult", + "SubResource", + "SysctlConfig", + "SystemData", + "TagsObject", + "TimeInWeek", + "TimeSpan", + "TrackedResource", + "TrustedAccessRole", + "TrustedAccessRoleBinding", + "TrustedAccessRoleBindingListResult", + "TrustedAccessRoleListResult", + "TrustedAccessRoleRule", + "UpgradeOverrideSettings", + "UserAssignedIdentity", + "WeeklySchedule", + "WindowsGmsaProfile", + "AgentPoolMode", + "AgentPoolType", + "BackendPoolType", + "Code", + "ConnectionStatus", + "CreatedByType", + "Expander", + "ExtendedLocationTypes", + "Format", + "GPUInstanceProfile", + "IpFamily", + "IstioIngressGatewayMode", + "KeyVaultNetworkAccessTypes", + "KubeletDiskType", + "KubernetesSupportPlan", + "LicenseType", + "LoadBalancerSku", + "ManagedClusterPodIdentityProvisioningState", + "ManagedClusterSKUName", + "ManagedClusterSKUTier", + "NetworkDataplane", + "NetworkMode", + "NetworkPlugin", + "NetworkPluginMode", + "NetworkPolicy", + "NodeOSUpgradeChannel", + "OSDiskType", + "OSSKU", + "OSType", + "OutboundType", + "PrivateEndpointConnectionProvisioningState", + "Protocol", + "PublicNetworkAccess", + "ResourceIdentityType", + "ScaleDownMode", + "ScaleSetEvictionPolicy", + "ScaleSetPriority", + "ServiceMeshMode", + "SnapshotType", + "TrustedAccessRoleBindingProvisioningState", + "Type", + "UpgradeChannel", + "WeekDay", + "WorkloadRuntime", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py new file mode 100644 index 00000000000..80f31aa50fb --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py @@ -0,0 +1,563 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AgentPoolMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A cluster must have at least one 'System' Agent Pool at all times. For additional information + on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. + """ + + SYSTEM = "System" + """System agent pools are primarily for hosting critical system pods such as CoreDNS and + metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at + least 2vCPUs and 4GB of memory.""" + USER = "User" + """User agent pools are primarily for hosting your application pods.""" + + +class AgentPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of Agent Pool.""" + + VIRTUAL_MACHINE_SCALE_SETS = "VirtualMachineScaleSets" + """Create an Agent Pool backed by a Virtual Machine Scale Set.""" + AVAILABILITY_SET = "AvailabilitySet" + """Use of this is strongly discouraged.""" + + +class BackendPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of the managed inbound Load Balancer BackendPool.""" + + NODE_IP_CONFIGURATION = "NodeIPConfiguration" + """The type of the managed inbound Load Balancer BackendPool. + https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/#configure-load-balancer-backend.""" + NODE_IP = "NodeIP" + """The type of the managed inbound Load Balancer BackendPool. + https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/#configure-load-balancer-backend.""" + + +class Code(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Tells whether the cluster is Running or Stopped.""" + + RUNNING = "Running" + """The cluster is running.""" + STOPPED = "Stopped" + """The cluster is stopped.""" + + +class ConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The private link service connection status.""" + + PENDING = "Pending" + APPROVED = "Approved" + REJECTED = "Rejected" + DISCONNECTED = "Disconnected" + + +class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of identity that created the resource.""" + + USER = "User" + APPLICATION = "Application" + MANAGED_IDENTITY = "ManagedIdentity" + KEY = "Key" + + +class Expander(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """If not specified, the default is 'random'. See `expanders + `_ + for more information. + """ + + LEAST_WASTE = "least-waste" + """Selects the node group that will have the least idle CPU (if tied, unused memory) after + scale-up. This is useful when you have different classes of nodes, for example, high CPU or + high memory nodes, and only want to expand those when there are pending pods that need a lot of + those resources.""" + MOST_PODS = "most-pods" + """Selects the node group that would be able to schedule the most pods when scaling up. This is + useful when you are using nodeSelector to make sure certain pods land on certain nodes. Note + that this won't cause the autoscaler to select bigger nodes vs. smaller, as it can add multiple + smaller nodes at once.""" + PRIORITY = "priority" + """Selects the node group that has the highest priority assigned by the user. It's configuration + is described in more details `here + `_.""" + RANDOM = "random" + """Used when you don't have a particular need for the node groups to scale differently.""" + + +class ExtendedLocationTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of extendedLocation.""" + + EDGE_ZONE = "EdgeZone" + + +class Format(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Format.""" + + AZURE = "azure" + """Return azure auth-provider kubeconfig. This format is deprecated in v1.22 and will be fully + removed in v1.26. See: https://aka.ms/k8s/changes-1-26.""" + EXEC = "exec" + """Return exec format kubeconfig. This format requires kubelogin binary in the path.""" + EXEC_ENUM = "exec" + """Return exec format kubeconfig. This format requires kubelogin binary in the path.""" + + +class GPUInstanceProfile(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.""" + + MIG1_G = "MIG1g" + MIG2_G = "MIG2g" + MIG3_G = "MIG3g" + MIG4_G = "MIG4g" + MIG7_G = "MIG7g" + + +class IpFamily(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The IP version to use for cluster networking and IP assignment.""" + + I_PV4 = "IPv4" + I_PV6 = "IPv6" + + +class IstioIngressGatewayMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Mode of an ingress gateway.""" + + EXTERNAL = "External" + """The ingress gateway is assigned a public IP address and is publicly accessible.""" + INTERNAL = "Internal" + """The ingress gateway is assigned an internal IP address and cannot is accessed publicly.""" + + +class KeyVaultNetworkAccessTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Network access of key vault. The possible values are ``Public`` and ``Private``. ``Public`` + means the key vault allows public access from all networks. ``Private`` means the key vault + disables public access and enables private link. The default value is ``Public``. + """ + + PUBLIC = "Public" + PRIVATE = "Private" + + +class KubeletDiskType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Determines the placement of emptyDir volumes, container runtime data root, and Kubelet + ephemeral storage. + """ + + OS = "OS" + """Kubelet will use the OS disk for its data.""" + TEMPORARY = "Temporary" + """Kubelet will use the temporary disk for its data.""" + + +class KubernetesSupportPlan(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Different support tiers for AKS managed clusters.""" + + KUBERNETES_OFFICIAL = "KubernetesOfficial" + """Support for the version is the same as for the open source Kubernetes offering. Official + Kubernetes open source community support versions for 1 year after release.""" + AKS_LONG_TERM_SUPPORT = "AKSLongTermSupport" + """Support for the version extended past the KubernetesOfficial support of 1 year. AKS continues + to patch CVEs for another 1 year, for a total of 2 years of support.""" + + +class LicenseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The license type to use for Windows VMs. See `Azure Hybrid User Benefits + `_ for more details. + """ + + NONE = "None" + """No additional licensing is applied.""" + WINDOWS_SERVER = "Windows_Server" + """Enables Azure Hybrid User Benefits for Windows VMs.""" + + +class LoadBalancerSku(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The default is 'standard'. See `Azure Load Balancer SKUs + `_ for more information about the + differences between load balancer SKUs. + """ + + STANDARD = "standard" + """Use a a standard Load Balancer. This is the recommended Load Balancer SKU. For more information + about on working with the load balancer in the managed cluster, see the `standard Load Balancer + `_ article.""" + BASIC = "basic" + """Use a basic Load Balancer with limited functionality.""" + + +class ManagedClusterPodIdentityProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current provisioning state of the pod identity.""" + + ASSIGNED = "Assigned" + CANCELED = "Canceled" + DELETING = "Deleting" + FAILED = "Failed" + SUCCEEDED = "Succeeded" + UPDATING = "Updating" + + +class ManagedClusterSKUName(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The name of a managed cluster SKU.""" + + BASE = "Base" + """Base option for the AKS control plane.""" + + +class ManagedClusterSKUTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """If not specified, the default is 'Free'. See `AKS Pricing Tier + `_ for more details. + """ + + PREMIUM = "Premium" + """Cluster has premium capabilities in addition to all of the capabilities included in 'Standard'. + Premium enables selection of LongTermSupport (aka.ms/aks/lts) for certain Kubernetes versions.""" + STANDARD = "Standard" + """Recommended for mission-critical and production workloads. Includes Kubernetes control plane + autoscaling, workload-intensive testing, and up to 5,000 nodes per cluster. Guarantees 99.95% + availability of the Kubernetes API server endpoint for clusters that use Availability Zones and + 99.9% of availability for clusters that don't use Availability Zones.""" + FREE = "Free" + """The cluster management is free, but charged for VM, storage, and networking usage. Best for + experimenting, learning, simple testing, or workloads with fewer than 10 nodes. Not recommended + for production use cases.""" + + +class NetworkDataplane(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Network dataplane used in the Kubernetes cluster.""" + + AZURE = "azure" + """Use Azure network dataplane.""" + CILIUM = "cilium" + """Use Cilium network dataplane. See `Azure CNI Powered by Cilium + `_ for more information.""" + + +class NetworkMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This cannot be specified if networkPlugin is anything other than 'azure'.""" + + TRANSPARENT = "transparent" + """No bridge is created. Intra-VM Pod to Pod communication is through IP routes created by Azure + CNI. See `Transparent Mode `_ for + more information.""" + BRIDGE = "bridge" + """This is no longer supported""" + + +class NetworkPlugin(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Network plugin used for building the Kubernetes network.""" + + AZURE = "azure" + """Use the Azure CNI network plugin. See `Azure CNI (advanced) networking + `_ for + more information.""" + KUBENET = "kubenet" + """Use the Kubenet network plugin. See `Kubenet (basic) networking + `_ for more + information.""" + NONE = "none" + """No CNI plugin is pre-installed. See `BYO CNI + `_ for more information.""" + + +class NetworkPluginMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The mode the network plugin should use.""" + + OVERLAY = "overlay" + """Used with networkPlugin=azure, pods are given IPs from the PodCIDR address space but use Azure + Routing Domains rather than Kubenet's method of route tables. For more information visit + https://aka.ms/aks/azure-cni-overlay.""" + + +class NetworkPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Network policy used for building the Kubernetes network.""" + + NONE = "none" + """Network policies will not be enforced. This is the default value when NetworkPolicy is not + specified.""" + CALICO = "calico" + """Use Calico network policies. See `differences between Azure and Calico policies + `_ + for more information.""" + AZURE = "azure" + """Use Azure network policies. See `differences between Azure and Calico policies + `_ + for more information.""" + CILIUM = "cilium" + """Use Cilium to enforce network policies. This requires networkDataplane to be 'cilium'.""" + + +class NodeOSUpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Manner in which the OS on your nodes is updated. The default is NodeImage.""" + + NONE = "None" + """No attempt to update your machines OS will be made either by OS or by rolling VHDs. This means + you are responsible for your security updates""" + UNMANAGED = "Unmanaged" + """OS updates will be applied automatically through the OS built-in patching infrastructure. Newly + scaled in machines will be unpatched initially and will be patched at some point by the OS's + infrastructure. Behavior of this option depends on the OS in question. Ubuntu and Mariner apply + security patches through unattended upgrade roughly once a day around 06:00 UTC. Windows does + not apply security patches automatically and so for them this option is equivalent to None till + further notice""" + NODE_IMAGE = "NodeImage" + """AKS will update the nodes with a newly patched VHD containing security fixes and bugfixes on a + weekly cadence. With the VHD update machines will be rolling reimaged to that VHD following + maintenance windows and surge settings. No extra VHD cost is incurred when choosing this option + as AKS hosts the images.""" + SECURITY_PATCH = "SecurityPatch" + """AKS downloads and updates the nodes with tested security updates. These updates honor the + maintenance window settings and produce a new VHD that is used on new nodes. On some occasions + it's not possible to apply the updates in place, in such cases the existing nodes will also be + re-imaged to the newly produced VHD in order to apply the changes. This option incurs an extra + cost of hosting the new Security Patch VHDs in your resource group for just in time + consumption.""" + + +class OSDiskType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested + OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more + information see `Ephemeral OS + `_. + """ + + MANAGED = "Managed" + """Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data + loss should the VM need to be relocated to another host. Since containers aren't designed to + have local state persisted, this behavior offers limited value while providing some drawbacks, + including slower node provisioning and higher read/write latency.""" + EPHEMERAL = "Ephemeral" + """Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This + provides lower read/write latency, along with faster node scaling and cluster upgrades.""" + + +class OSSKU(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The + default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType + is Windows. + """ + + UBUNTU = "Ubuntu" + """Use Ubuntu as the OS for node images.""" + AZURE_LINUX = "AzureLinux" + """Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro + built by Microsoft, visit https://aka.ms/azurelinux for more information.""" + CBL_MARINER = "CBLMariner" + """Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.""" + WINDOWS2019 = "Windows2019" + """Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only + supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.""" + WINDOWS2022 = "Windows2022" + """Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only + supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.""" + + +class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The operating system type. The default is Linux.""" + + LINUX = "Linux" + """Use Linux.""" + WINDOWS = "Windows" + """Use Windows.""" + + +class OutboundType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This can only be set at cluster creation time and cannot be changed later. For more information + see `egress outbound type `_. + """ + + LOAD_BALANCER = "loadBalancer" + """The load balancer is used for egress through an AKS assigned public IP. This supports + Kubernetes services of type 'loadBalancer'. For more information see `outbound type + loadbalancer + `_.""" + USER_DEFINED_ROUTING = "userDefinedRouting" + """Egress paths must be defined by the user. This is an advanced scenario and requires proper + network configuration. For more information see `outbound type userDefinedRouting + `_.""" + MANAGED_NAT_GATEWAY = "managedNATGateway" + """The AKS-managed NAT gateway is used for egress.""" + USER_ASSIGNED_NAT_GATEWAY = "userAssignedNATGateway" + """The user-assigned NAT gateway associated to the cluster subnet is used for egress. This is an + advanced scenario and requires proper network configuration.""" + + +class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current provisioning state.""" + + CANCELED = "Canceled" + CREATING = "Creating" + DELETING = "Deleting" + FAILED = "Failed" + SUCCEEDED = "Succeeded" + + +class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The network protocol of the port.""" + + TCP = "TCP" + """TCP protocol.""" + UDP = "UDP" + """UDP protocol.""" + + +class PublicNetworkAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Allow or deny public network access for AKS.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class ResourceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """For more information see `use managed identities in AKS + `_. + """ + + SYSTEM_ASSIGNED = "SystemAssigned" + """Use an implicitly created system assigned managed identity to manage cluster resources. Master + components in the control plane such as kube-controller-manager will use the system assigned + managed identity to manipulate Azure resources.""" + USER_ASSIGNED = "UserAssigned" + """Use a user-specified identity to manage cluster resources. Master components in the control + plane such as kube-controller-manager will use the specified user assigned managed identity to + manipulate Azure resources.""" + NONE = "None" + """Do not use a managed identity for the Managed Cluster, service principal will be used instead.""" + + +class ScaleDownMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes how VMs are added to or removed from Agent Pools. See `billing states + `_. + """ + + DELETE = "Delete" + """Create new instances during scale up and remove instances during scale down.""" + DEALLOCATE = "Deallocate" + """Attempt to start deallocated instances (if they exist) during scale up and deallocate instances + during scale down.""" + + +class ScaleSetEvictionPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The eviction policy specifies what to do with the VM when it is evicted. The default is Delete. + For more information about eviction see `spot VMs + `_. + """ + + DELETE = "Delete" + """Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.""" + DEALLOCATE = "Deallocate" + """Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state + upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can + cause issues with cluster scaling or upgrading.""" + + +class ScaleSetPriority(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Virtual Machine Scale Set priority.""" + + SPOT = "Spot" + """Spot priority VMs will be used. There is no SLA for spot nodes. See `spot on AKS + `_ for more information.""" + REGULAR = "Regular" + """Regular VMs will be used.""" + + +class ServiceMeshMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Mode of the service mesh.""" + + ISTIO = "Istio" + """Istio deployed as an AKS addon.""" + DISABLED = "Disabled" + """Mesh is disabled.""" + + +class SnapshotType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of a snapshot. The default is NodePool.""" + + NODE_POOL = "NodePool" + """The snapshot is a snapshot of a node pool.""" + + +class TrustedAccessRoleBindingProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current provisioning state of trusted access role binding.""" + + CANCELED = "Canceled" + DELETING = "Deleting" + FAILED = "Failed" + SUCCEEDED = "Succeeded" + UPDATING = "Updating" + + +class Type(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies on which week of the month the dayOfWeek applies.""" + + FIRST = "First" + """First week of the month.""" + SECOND = "Second" + """Second week of the month.""" + THIRD = "Third" + """Third week of the month.""" + FOURTH = "Fourth" + """Fourth week of the month.""" + LAST = "Last" + """Last week of the month.""" + + +class UpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """For more information see `setting the AKS cluster auto-upgrade channel + `_. + """ + + RAPID = "rapid" + """Automatically upgrade the cluster to the latest supported patch release on the latest supported + minor version. In cases where the cluster is at a version of Kubernetes that is at an N-2 minor + version where N is the latest supported minor version, the cluster first upgrades to the latest + supported patch version on N-1 minor version. For example, if a cluster is running version + 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster first is + upgraded to 1.18.6, then is upgraded to 1.19.1.""" + STABLE = "stable" + """Automatically upgrade the cluster to the latest supported patch release on minor version N-1, + where N is the latest supported minor version. For example, if a cluster is running version + 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is upgraded + to 1.18.6.""" + PATCH = "patch" + """Automatically upgrade the cluster to the latest supported patch version when it becomes + available while keeping the minor version the same. For example, if a cluster is running + version 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is + upgraded to 1.17.9.""" + NODE_IMAGE = "node-image" + """Automatically upgrade the node image to the latest version available. Consider using + nodeOSUpgradeChannel instead as that allows you to configure node OS patching separate from + Kubernetes version patching""" + NONE = "none" + """Disables auto-upgrades and keeps the cluster at its current version of Kubernetes.""" + + +class WeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The weekday enum.""" + + SUNDAY = "Sunday" + MONDAY = "Monday" + TUESDAY = "Tuesday" + WEDNESDAY = "Wednesday" + THURSDAY = "Thursday" + FRIDAY = "Friday" + SATURDAY = "Saturday" + + +class WorkloadRuntime(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Determines the type of workload a node can run.""" + + OCI_CONTAINER = "OCIContainer" + """Nodes will use Kubelet to run standard OCI container workloads.""" + WASM_WASI = "WasmWasi" + """Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).""" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py new file mode 100644 index 00000000000..3c3086ec9ba --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py @@ -0,0 +1,8231 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from ... import _serialization + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class AbsoluteMonthlySchedule(_serialization.Model): + """For schedules like: 'recur every month on the 15th' or 'recur every 3 months on the 20th'. + + All required parameters must be populated in order to send to server. + + :ivar interval_months: Specifies the number of months between each set of occurrences. + Required. + :vartype interval_months: int + :ivar day_of_month: The date of the month. Required. + :vartype day_of_month: int + """ + + _validation = { + "interval_months": {"required": True, "maximum": 6, "minimum": 1}, + "day_of_month": {"required": True, "maximum": 31, "minimum": 1}, + } + + _attribute_map = { + "interval_months": {"key": "intervalMonths", "type": "int"}, + "day_of_month": {"key": "dayOfMonth", "type": "int"}, + } + + def __init__(self, *, interval_months: int, day_of_month: int, **kwargs: Any) -> None: + """ + :keyword interval_months: Specifies the number of months between each set of occurrences. + Required. + :paramtype interval_months: int + :keyword day_of_month: The date of the month. Required. + :paramtype day_of_month: int + """ + super().__init__(**kwargs) + self.interval_months = interval_months + self.day_of_month = day_of_month + + +class SubResource(_serialization.Model): + """Reference to another subresource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource ID. + :vartype id: str + :ivar name: The name of the resource that is unique within a resource group. This name can be + used to access the resource. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + + +class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes + """Agent Pool. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource ID. + :vartype id: str + :ivar name: The name of the resource that is unique within a resource group. This name can be + used to access the resource. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :vartype count: int + :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute + resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted + VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :vartype vm_size: str + :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine + in the master/agent pool. If you specify 0, it will apply the default osDisk size according to + the vmSize specified. + :vartype os_disk_size_gb: int + :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data + root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :vartype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :ivar workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :vartype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. + If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just + nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype vnet_subnet_id: str + :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype pod_subnet_id: str + :ivar max_pods: The maximum number of pods that can run on a node. + :vartype max_pods: int + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is + Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :ivar max_count: The maximum number of nodes for auto-scaling. + :vartype max_count: int + :ivar min_count: The minimum number of nodes for auto-scaling. + :vartype min_count: int + :ivar enable_auto_scaling: Whether to enable auto-scaler. + :vartype enable_auto_scaling: bool + :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it + defaults to Delete. Known values are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :ivar type_properties_type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" + and "AvailabilitySet". + :vartype type_properties_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional + information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :vartype orchestrator_version: str + :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version + , this field will be exactly equal to it. If orchestratorVersion is + , this field will contain the full version being used. + :vartype current_orchestrator_version: str + :ivar node_image_version: The version of node image. + :vartype node_image_version: str + :ivar upgrade_settings: Settings for upgrading the agentpool. + :vartype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :ivar provisioning_state: The current deployment or provisioning state. + :vartype provisioning_state: str + :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool + can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and + does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :vartype availability_zones: list[str] + :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their + own dedicated public IP addresses. A common scenario is for gaming workloads, where a console + needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. + The default is false. + :vartype enable_node_public_ip: bool + :ivar node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :vartype node_public_ip_prefix_id: str + :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default + is 'Regular'. Known values are: "Spot" and "Regular". + :vartype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :vartype spot_max_price: float + :ivar tags: The tags to be persisted on the agent pool virtual machine scale set. + :vartype tags: dict[str, str] + :ivar node_labels: The node labels to be persisted across all nodes in agent pool. + :vartype node_labels: dict[str, str] + :ivar node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :vartype node_taints: list[str] + :ivar proximity_placement_group_id: The ID for Proximity Placement Group. + :vartype proximity_placement_group_id: str + :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. + :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :ivar linux_os_config: The OS configuration of Linux agent nodes. + :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype enable_encryption_at_host: bool + :ivar enable_ultra_ssd: Whether to enable UltraSSD. + :vartype enable_ultra_ssd: bool + :ivar enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :vartype enable_fips: bool + :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile + for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". + :vartype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool + will be created/upgraded using a snapshot. + :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :vartype capacity_reservation_group_id: str + :ivar host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :vartype host_group_id: str + :ivar network_profile: Network-related settings of an agent pool. + :vartype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :ivar windows_profile: The Windows agent pool's specific profile. + :vartype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :ivar security_profile: The security settings of an agent pool. + :vartype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, + "current_orchestrator_version": {"readonly": True}, + "node_image_version": {"readonly": True}, + "provisioning_state": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "count": {"key": "properties.count", "type": "int"}, + "vm_size": {"key": "properties.vmSize", "type": "str"}, + "os_disk_size_gb": {"key": "properties.osDiskSizeGB", "type": "int"}, + "os_disk_type": {"key": "properties.osDiskType", "type": "str"}, + "kubelet_disk_type": {"key": "properties.kubeletDiskType", "type": "str"}, + "workload_runtime": {"key": "properties.workloadRuntime", "type": "str"}, + "vnet_subnet_id": {"key": "properties.vnetSubnetID", "type": "str"}, + "pod_subnet_id": {"key": "properties.podSubnetID", "type": "str"}, + "max_pods": {"key": "properties.maxPods", "type": "int"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "os_sku": {"key": "properties.osSKU", "type": "str"}, + "max_count": {"key": "properties.maxCount", "type": "int"}, + "min_count": {"key": "properties.minCount", "type": "int"}, + "enable_auto_scaling": {"key": "properties.enableAutoScaling", "type": "bool"}, + "scale_down_mode": {"key": "properties.scaleDownMode", "type": "str"}, + "type_properties_type": {"key": "properties.type", "type": "str"}, + "mode": {"key": "properties.mode", "type": "str"}, + "orchestrator_version": {"key": "properties.orchestratorVersion", "type": "str"}, + "current_orchestrator_version": {"key": "properties.currentOrchestratorVersion", "type": "str"}, + "node_image_version": {"key": "properties.nodeImageVersion", "type": "str"}, + "upgrade_settings": {"key": "properties.upgradeSettings", "type": "AgentPoolUpgradeSettings"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "power_state": {"key": "properties.powerState", "type": "PowerState"}, + "availability_zones": {"key": "properties.availabilityZones", "type": "[str]"}, + "enable_node_public_ip": {"key": "properties.enableNodePublicIP", "type": "bool"}, + "node_public_ip_prefix_id": {"key": "properties.nodePublicIPPrefixID", "type": "str"}, + "scale_set_priority": {"key": "properties.scaleSetPriority", "type": "str"}, + "scale_set_eviction_policy": {"key": "properties.scaleSetEvictionPolicy", "type": "str"}, + "spot_max_price": {"key": "properties.spotMaxPrice", "type": "float"}, + "tags": {"key": "properties.tags", "type": "{str}"}, + "node_labels": {"key": "properties.nodeLabels", "type": "{str}"}, + "node_taints": {"key": "properties.nodeTaints", "type": "[str]"}, + "proximity_placement_group_id": {"key": "properties.proximityPlacementGroupID", "type": "str"}, + "kubelet_config": {"key": "properties.kubeletConfig", "type": "KubeletConfig"}, + "linux_os_config": {"key": "properties.linuxOSConfig", "type": "LinuxOSConfig"}, + "enable_encryption_at_host": {"key": "properties.enableEncryptionAtHost", "type": "bool"}, + "enable_ultra_ssd": {"key": "properties.enableUltraSSD", "type": "bool"}, + "enable_fips": {"key": "properties.enableFIPS", "type": "bool"}, + "gpu_instance_profile": {"key": "properties.gpuInstanceProfile", "type": "str"}, + "creation_data": {"key": "properties.creationData", "type": "CreationData"}, + "capacity_reservation_group_id": {"key": "properties.capacityReservationGroupID", "type": "str"}, + "host_group_id": {"key": "properties.hostGroupID", "type": "str"}, + "network_profile": {"key": "properties.networkProfile", "type": "AgentPoolNetworkProfile"}, + "windows_profile": {"key": "properties.windowsProfile", "type": "AgentPoolWindowsProfile"}, + "security_profile": {"key": "properties.securityProfile", "type": "AgentPoolSecurityProfile"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + count: Optional[int] = None, + vm_size: Optional[str] = None, + os_disk_size_gb: Optional[int] = None, + os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, + kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, + workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + vnet_subnet_id: Optional[str] = None, + pod_subnet_id: Optional[str] = None, + max_pods: Optional[int] = None, + os_type: Union[str, "_models.OSType"] = "Linux", + os_sku: Optional[Union[str, "_models.OSSKU"]] = None, + max_count: Optional[int] = None, + min_count: Optional[int] = None, + enable_auto_scaling: Optional[bool] = None, + scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None, + type_properties_type: Optional[Union[str, "_models.AgentPoolType"]] = None, + mode: Optional[Union[str, "_models.AgentPoolMode"]] = None, + orchestrator_version: Optional[str] = None, + upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, + power_state: Optional["_models.PowerState"] = None, + availability_zones: Optional[List[str]] = None, + enable_node_public_ip: Optional[bool] = None, + node_public_ip_prefix_id: Optional[str] = None, + scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", + scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", + spot_max_price: float = -1, + tags: Optional[Dict[str, str]] = None, + node_labels: Optional[Dict[str, str]] = None, + node_taints: Optional[List[str]] = None, + proximity_placement_group_id: Optional[str] = None, + kubelet_config: Optional["_models.KubeletConfig"] = None, + linux_os_config: Optional["_models.LinuxOSConfig"] = None, + enable_encryption_at_host: Optional[bool] = None, + enable_ultra_ssd: Optional[bool] = None, + enable_fips: Optional[bool] = None, + gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None, + creation_data: Optional["_models.CreationData"] = None, + capacity_reservation_group_id: Optional[str] = None, + host_group_id: Optional[str] = None, + network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, + windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, + security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :paramtype count: int + :keyword vm_size: VM size availability varies by region. If a node contains insufficient + compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on + restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :paramtype vm_size: str + :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every + machine in the master/agent pool. If you specify 0, it will apply the default osDisk size + according to the vmSize specified. + :paramtype os_disk_size_gb: int + :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime + data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :paramtype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :keyword workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :paramtype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and + used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to + just nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype vnet_subnet_id: str + :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype pod_subnet_id: str + :keyword max_pods: The maximum number of pods that can run on a node. + :paramtype max_pods: int + :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" + and "Windows". + :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType + is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :keyword max_count: The maximum number of nodes for auto-scaling. + :paramtype max_count: int + :keyword min_count: The minimum number of nodes for auto-scaling. + :paramtype min_count: int + :keyword enable_auto_scaling: Whether to enable auto-scaler. + :paramtype enable_auto_scaling: bool + :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, + it defaults to Delete. Known values are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :keyword type_properties_type: The type of Agent Pool. Known values are: + "VirtualMachineScaleSets" and "AvailabilitySet". + :paramtype type_properties_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For + additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :paramtype orchestrator_version: str + :keyword upgrade_settings: Settings for upgrading the agentpool. + :paramtype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent + Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs + and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :keyword availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :paramtype availability_zones: list[str] + :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive + their own dedicated public IP addresses. A common scenario is for gaming workloads, where a + console needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. # pylint: disable=line-too-long + The default is false. + :paramtype enable_node_public_ip: bool + :keyword node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :paramtype node_public_ip_prefix_id: str + :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the + default is 'Regular'. Known values are: "Spot" and "Regular". + :paramtype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :paramtype spot_max_price: float + :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. + :paramtype tags: dict[str, str] + :keyword node_labels: The node labels to be persisted across all nodes in agent pool. + :paramtype node_labels: dict[str, str] + :keyword node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :paramtype node_taints: list[str] + :keyword proximity_placement_group_id: The ID for Proximity Placement Group. + :paramtype proximity_placement_group_id: str + :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. + :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :keyword linux_os_config: The OS configuration of Linux agent nodes. + :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype enable_encryption_at_host: bool + :keyword enable_ultra_ssd: Whether to enable UltraSSD. + :paramtype enable_ultra_ssd: bool + :keyword enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :paramtype enable_fips: bool + :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance + profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and + "MIG7g". + :paramtype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node + pool will be created/upgraded using a snapshot. + :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :paramtype capacity_reservation_group_id: str + :keyword host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :paramtype host_group_id: str + :keyword network_profile: Network-related settings of an agent pool. + :paramtype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :keyword windows_profile: The Windows agent pool's specific profile. + :paramtype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :keyword security_profile: The security settings of an agent pool. + :paramtype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + """ + super().__init__(**kwargs) + self.count = count + self.vm_size = vm_size + self.os_disk_size_gb = os_disk_size_gb + self.os_disk_type = os_disk_type + self.kubelet_disk_type = kubelet_disk_type + self.workload_runtime = workload_runtime + self.vnet_subnet_id = vnet_subnet_id + self.pod_subnet_id = pod_subnet_id + self.max_pods = max_pods + self.os_type = os_type + self.os_sku = os_sku + self.max_count = max_count + self.min_count = min_count + self.enable_auto_scaling = enable_auto_scaling + self.scale_down_mode = scale_down_mode + self.type_properties_type = type_properties_type + self.mode = mode + self.orchestrator_version = orchestrator_version + self.current_orchestrator_version = None + self.node_image_version = None + self.upgrade_settings = upgrade_settings + self.provisioning_state = None + self.power_state = power_state + self.availability_zones = availability_zones + self.enable_node_public_ip = enable_node_public_ip + self.node_public_ip_prefix_id = node_public_ip_prefix_id + self.scale_set_priority = scale_set_priority + self.scale_set_eviction_policy = scale_set_eviction_policy + self.spot_max_price = spot_max_price + self.tags = tags + self.node_labels = node_labels + self.node_taints = node_taints + self.proximity_placement_group_id = proximity_placement_group_id + self.kubelet_config = kubelet_config + self.linux_os_config = linux_os_config + self.enable_encryption_at_host = enable_encryption_at_host + self.enable_ultra_ssd = enable_ultra_ssd + self.enable_fips = enable_fips + self.gpu_instance_profile = gpu_instance_profile + self.creation_data = creation_data + self.capacity_reservation_group_id = capacity_reservation_group_id + self.host_group_id = host_group_id + self.network_profile = network_profile + self.windows_profile = windows_profile + self.security_profile = security_profile + + +class AgentPoolAvailableVersions(_serialization.Model): + """The list of available versions for an agent pool. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: The ID of the agent pool version list. + :vartype id: str + :ivar name: The name of the agent pool version list. + :vartype name: str + :ivar type: Type of the agent pool version list. + :vartype type: str + :ivar agent_pool_versions: List of versions available for agent pool. + :vartype agent_pool_versions: + list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "agent_pool_versions": { + "key": "properties.agentPoolVersions", + "type": "[AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]", + }, + } + + def __init__( + self, + *, + agent_pool_versions: Optional[List["_models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword agent_pool_versions: List of versions available for agent pool. + :paramtype agent_pool_versions: + list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.agent_pool_versions = agent_pool_versions + + +class AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem(_serialization.Model): # pylint: disable=name-too-long + """AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem. + + :ivar default: Whether this version is the default agent pool version. + :vartype default: bool + :ivar kubernetes_version: The Kubernetes version (major.minor.patch). + :vartype kubernetes_version: str + :ivar is_preview: Whether Kubernetes version is currently in preview. + :vartype is_preview: bool + """ + + _attribute_map = { + "default": {"key": "default", "type": "bool"}, + "kubernetes_version": {"key": "kubernetesVersion", "type": "str"}, + "is_preview": {"key": "isPreview", "type": "bool"}, + } + + def __init__( + self, + *, + default: Optional[bool] = None, + kubernetes_version: Optional[str] = None, + is_preview: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword default: Whether this version is the default agent pool version. + :paramtype default: bool + :keyword kubernetes_version: The Kubernetes version (major.minor.patch). + :paramtype kubernetes_version: str + :keyword is_preview: Whether Kubernetes version is currently in preview. + :paramtype is_preview: bool + """ + super().__init__(**kwargs) + self.default = default + self.kubernetes_version = kubernetes_version + self.is_preview = is_preview + + +class AgentPoolDeleteMachinesParameter(_serialization.Model): + """Specifies a list of machine names from the agent pool to be deleted. + + All required parameters must be populated in order to send to server. + + :ivar machine_names: The agent pool machine names. Required. + :vartype machine_names: list[str] + """ + + _validation = { + "machine_names": {"required": True}, + } + + _attribute_map = { + "machine_names": {"key": "machineNames", "type": "[str]"}, + } + + def __init__(self, *, machine_names: List[str], **kwargs: Any) -> None: + """ + :keyword machine_names: The agent pool machine names. Required. + :paramtype machine_names: list[str] + """ + super().__init__(**kwargs) + self.machine_names = machine_names + + +class AgentPoolListResult(_serialization.Model): + """The response from the List Agent Pools operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: The list of agent pools. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :ivar next_link: The URL to get the next set of agent pool results. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[AgentPool]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.AgentPool"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The list of agent pools. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class AgentPoolNetworkProfile(_serialization.Model): + """Network settings of an agent pool. + + :ivar node_public_ip_tags: IPTags of instance-level public IPs. + :vartype node_public_ip_tags: list[~azure.mgmt.containerservice.v2024_07_01.models.IPTag] + :ivar allowed_host_ports: The port ranges that are allowed to access. The specified ranges are + allowed to overlap. + :vartype allowed_host_ports: list[~azure.mgmt.containerservice.v2024_07_01.models.PortRange] + :ivar application_security_groups: The IDs of the application security groups which agent pool + will associate when created. + :vartype application_security_groups: list[str] + """ + + _attribute_map = { + "node_public_ip_tags": {"key": "nodePublicIPTags", "type": "[IPTag]"}, + "allowed_host_ports": {"key": "allowedHostPorts", "type": "[PortRange]"}, + "application_security_groups": {"key": "applicationSecurityGroups", "type": "[str]"}, + } + + def __init__( + self, + *, + node_public_ip_tags: Optional[List["_models.IPTag"]] = None, + allowed_host_ports: Optional[List["_models.PortRange"]] = None, + application_security_groups: Optional[List[str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword node_public_ip_tags: IPTags of instance-level public IPs. + :paramtype node_public_ip_tags: list[~azure.mgmt.containerservice.v2024_07_01.models.IPTag] + :keyword allowed_host_ports: The port ranges that are allowed to access. The specified ranges + are allowed to overlap. + :paramtype allowed_host_ports: list[~azure.mgmt.containerservice.v2024_07_01.models.PortRange] + :keyword application_security_groups: The IDs of the application security groups which agent + pool will associate when created. + :paramtype application_security_groups: list[str] + """ + super().__init__(**kwargs) + self.node_public_ip_tags = node_public_ip_tags + self.allowed_host_ports = allowed_host_ports + self.application_security_groups = application_security_groups + + +class AgentPoolSecurityProfile(_serialization.Model): + """The security settings of an agent pool. + + :ivar enable_vtpm: vTPM is a Trusted Launch feature for configuring a dedicated secure vault + for keys and measurements held locally on the node. For more details, see + aka.ms/aks/trustedlaunch. If not specified, the default is false. + :vartype enable_vtpm: bool + :ivar enable_secure_boot: Secure Boot is a feature of Trusted Launch which ensures that only + signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch. + If not specified, the default is false. + :vartype enable_secure_boot: bool + """ + + _attribute_map = { + "enable_vtpm": {"key": "enableVTPM", "type": "bool"}, + "enable_secure_boot": {"key": "enableSecureBoot", "type": "bool"}, + } + + def __init__( + self, *, enable_vtpm: Optional[bool] = None, enable_secure_boot: Optional[bool] = None, **kwargs: Any + ) -> None: + """ + :keyword enable_vtpm: vTPM is a Trusted Launch feature for configuring a dedicated secure vault + for keys and measurements held locally on the node. For more details, see + aka.ms/aks/trustedlaunch. If not specified, the default is false. + :paramtype enable_vtpm: bool + :keyword enable_secure_boot: Secure Boot is a feature of Trusted Launch which ensures that only + signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch. + If not specified, the default is false. + :paramtype enable_secure_boot: bool + """ + super().__init__(**kwargs) + self.enable_vtpm = enable_vtpm + self.enable_secure_boot = enable_secure_boot + + +class AgentPoolUpgradeProfile(_serialization.Model): + """The list of available upgrades for an agent pool. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the agent pool upgrade profile. + :vartype id: str + :ivar name: The name of the agent pool upgrade profile. + :vartype name: str + :ivar type: The type of the agent pool upgrade profile. + :vartype type: str + :ivar kubernetes_version: The Kubernetes version (major.minor.patch). Required. + :vartype kubernetes_version: str + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar upgrades: List of orchestrator types and versions available for upgrade. + :vartype upgrades: + list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] + :ivar latest_node_image_version: The latest AKS supported node image version. + :vartype latest_node_image_version: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "kubernetes_version": {"required": True}, + "os_type": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "upgrades": {"key": "properties.upgrades", "type": "[AgentPoolUpgradeProfilePropertiesUpgradesItem]"}, + "latest_node_image_version": {"key": "properties.latestNodeImageVersion", "type": "str"}, + } + + def __init__( + self, + *, + kubernetes_version: str, + os_type: Union[str, "_models.OSType"] = "Linux", + upgrades: Optional[List["_models.AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None, + latest_node_image_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword kubernetes_version: The Kubernetes version (major.minor.patch). Required. + :paramtype kubernetes_version: str + :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" + and "Windows". + :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :keyword upgrades: List of orchestrator types and versions available for upgrade. + :paramtype upgrades: + list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] + :keyword latest_node_image_version: The latest AKS supported node image version. + :paramtype latest_node_image_version: str + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.kubernetes_version = kubernetes_version + self.os_type = os_type + self.upgrades = upgrades + self.latest_node_image_version = latest_node_image_version + + +class AgentPoolUpgradeProfilePropertiesUpgradesItem(_serialization.Model): # pylint: disable=name-too-long + """AgentPoolUpgradeProfilePropertiesUpgradesItem. + + :ivar kubernetes_version: The Kubernetes version (major.minor.patch). + :vartype kubernetes_version: str + :ivar is_preview: Whether the Kubernetes version is currently in preview. + :vartype is_preview: bool + """ + + _attribute_map = { + "kubernetes_version": {"key": "kubernetesVersion", "type": "str"}, + "is_preview": {"key": "isPreview", "type": "bool"}, + } + + def __init__( + self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any + ) -> None: + """ + :keyword kubernetes_version: The Kubernetes version (major.minor.patch). + :paramtype kubernetes_version: str + :keyword is_preview: Whether the Kubernetes version is currently in preview. + :paramtype is_preview: bool + """ + super().__init__(**kwargs) + self.kubernetes_version = kubernetes_version + self.is_preview = is_preview + + +class AgentPoolUpgradeSettings(_serialization.Model): + """Settings for upgrading an agentpool. + + :ivar max_surge: This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). + If a percentage is specified, it is the percentage of the total agent pool size at the time of + the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is + 1. For more information, including best practices, see: + https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade. + :vartype max_surge: str + :ivar drain_timeout_in_minutes: The amount of time (in minutes) to wait on eviction of pods and + graceful termination per node. This eviction wait time honors waiting on pod disruption + budgets. If this time is exceeded, the upgrade fails. If not specified, the default is 30 + minutes. + :vartype drain_timeout_in_minutes: int + :ivar node_soak_duration_in_minutes: The amount of time (in minutes) to wait after draining a + node and before reimaging it and moving on to next node. If not specified, the default is 0 + minutes. + :vartype node_soak_duration_in_minutes: int + """ + + _validation = { + "drain_timeout_in_minutes": {"maximum": 1440, "minimum": 1}, + "node_soak_duration_in_minutes": {"maximum": 30, "minimum": 0}, + } + + _attribute_map = { + "max_surge": {"key": "maxSurge", "type": "str"}, + "drain_timeout_in_minutes": {"key": "drainTimeoutInMinutes", "type": "int"}, + "node_soak_duration_in_minutes": {"key": "nodeSoakDurationInMinutes", "type": "int"}, + } + + def __init__( + self, + *, + max_surge: Optional[str] = None, + drain_timeout_in_minutes: Optional[int] = None, + node_soak_duration_in_minutes: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword max_surge: This can either be set to an integer (e.g. '5') or a percentage (e.g. + '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the + time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the + default is 1. For more information, including best practices, see: + https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade. + :paramtype max_surge: str + :keyword drain_timeout_in_minutes: The amount of time (in minutes) to wait on eviction of pods + and graceful termination per node. This eviction wait time honors waiting on pod disruption + budgets. If this time is exceeded, the upgrade fails. If not specified, the default is 30 + minutes. + :paramtype drain_timeout_in_minutes: int + :keyword node_soak_duration_in_minutes: The amount of time (in minutes) to wait after draining + a node and before reimaging it and moving on to next node. If not specified, the default is 0 + minutes. + :paramtype node_soak_duration_in_minutes: int + """ + super().__init__(**kwargs) + self.max_surge = max_surge + self.drain_timeout_in_minutes = drain_timeout_in_minutes + self.node_soak_duration_in_minutes = node_soak_duration_in_minutes + + +class AgentPoolWindowsProfile(_serialization.Model): + """The Windows agent pool's specific profile. + + :ivar disable_outbound_nat: The default value is false. Outbound NAT can only be disabled if + the cluster outboundType is NAT Gateway and the Windows agent pool does not have node public IP + enabled. + :vartype disable_outbound_nat: bool + """ + + _attribute_map = { + "disable_outbound_nat": {"key": "disableOutboundNat", "type": "bool"}, + } + + def __init__(self, *, disable_outbound_nat: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword disable_outbound_nat: The default value is false. Outbound NAT can only be disabled if + the cluster outboundType is NAT Gateway and the Windows agent pool does not have node public IP + enabled. + :paramtype disable_outbound_nat: bool + """ + super().__init__(**kwargs) + self.disable_outbound_nat = disable_outbound_nat + + +class AzureKeyVaultKms(_serialization.Model): + """Azure Key Vault key management service settings for the security profile. + + :ivar enabled: Whether to enable Azure Key Vault key management service. The default is false. + :vartype enabled: bool + :ivar key_id: Identifier of Azure Key Vault key. See `key identifier format + `_ # pylint: disable=line-too-long + for more details. When Azure Key Vault key management service is enabled, this field is + required and must be a valid key identifier. When Azure Key Vault key management service is + disabled, leave the field empty. + :vartype key_id: str + :ivar key_vault_network_access: Network access of key vault. The possible values are ``Public`` + and ``Private``. ``Public`` means the key vault allows public access from all networks. + ``Private`` means the key vault disables public access and enables private link. The default + value is ``Public``. Known values are: "Public" and "Private". + :vartype key_vault_network_access: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KeyVaultNetworkAccessTypes + :ivar key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is + ``Private``\\ , this field is required and must be a valid resource ID. When + keyVaultNetworkAccess is ``Public``\\ , leave the field empty. + :vartype key_vault_resource_id: str + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "key_id": {"key": "keyId", "type": "str"}, + "key_vault_network_access": {"key": "keyVaultNetworkAccess", "type": "str"}, + "key_vault_resource_id": {"key": "keyVaultResourceId", "type": "str"}, + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + key_id: Optional[str] = None, + key_vault_network_access: Union[str, "_models.KeyVaultNetworkAccessTypes"] = "Public", + key_vault_resource_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Whether to enable Azure Key Vault key management service. The default is + false. + :paramtype enabled: bool + :keyword key_id: Identifier of Azure Key Vault key. See `key identifier format + `_ # pylint: disable=line-too-long + for more details. When Azure Key Vault key management service is enabled, this field is + required and must be a valid key identifier. When Azure Key Vault key management service is + disabled, leave the field empty. + :paramtype key_id: str + :keyword key_vault_network_access: Network access of key vault. The possible values are + ``Public`` and ``Private``. ``Public`` means the key vault allows public access from all + networks. ``Private`` means the key vault disables public access and enables private link. The + default value is ``Public``. Known values are: "Public" and "Private". + :paramtype key_vault_network_access: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KeyVaultNetworkAccessTypes + :keyword key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is + ``Private``\\ , this field is required and must be a valid resource ID. When + keyVaultNetworkAccess is ``Public``\\ , leave the field empty. + :paramtype key_vault_resource_id: str + """ + super().__init__(**kwargs) + self.enabled = enabled + self.key_id = key_id + self.key_vault_network_access = key_vault_network_access + self.key_vault_resource_id = key_vault_resource_id + + +class CloudErrorBody(_serialization.Model): + """An error response from the Container service. + + :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :vartype code: str + :ivar message: A message describing the error, intended to be suitable for display in a user + interface. + :vartype message: str + :ivar target: The target of the particular error. For example, the name of the property in + error. + :vartype target: str + :ivar details: A list of additional details about the error. + :vartype details: list[~azure.mgmt.containerservice.v2024_07_01.models.CloudErrorBody] + """ + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[CloudErrorBody]"}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + target: Optional[str] = None, + details: Optional[List["_models.CloudErrorBody"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :paramtype code: str + :keyword message: A message describing the error, intended to be suitable for display in a user + interface. + :paramtype message: str + :keyword target: The target of the particular error. For example, the name of the property in + error. + :paramtype target: str + :keyword details: A list of additional details about the error. + :paramtype details: list[~azure.mgmt.containerservice.v2024_07_01.models.CloudErrorBody] + """ + super().__init__(**kwargs) + self.code = code + self.message = message + self.target = target + self.details = details + + +class ClusterUpgradeSettings(_serialization.Model): + """Settings for upgrading a cluster. + + :ivar override_settings: Settings for overrides. + :vartype override_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeOverrideSettings + """ + + _attribute_map = { + "override_settings": {"key": "overrideSettings", "type": "UpgradeOverrideSettings"}, + } + + def __init__(self, *, override_settings: Optional["_models.UpgradeOverrideSettings"] = None, **kwargs: Any) -> None: + """ + :keyword override_settings: Settings for overrides. + :paramtype override_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeOverrideSettings + """ + super().__init__(**kwargs) + self.override_settings = override_settings + + +class CompatibleVersions(_serialization.Model): + """Version information about a product/service that is compatible with a service mesh revision. + + :ivar name: The product/service name. + :vartype name: str + :ivar versions: Product/service versions compatible with a service mesh add-on revision. + :vartype versions: list[str] + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "versions": {"key": "versions", "type": "[str]"}, + } + + def __init__(self, *, name: Optional[str] = None, versions: Optional[List[str]] = None, **kwargs: Any) -> None: + """ + :keyword name: The product/service name. + :paramtype name: str + :keyword versions: Product/service versions compatible with a service mesh add-on revision. + :paramtype versions: list[str] + """ + super().__init__(**kwargs) + self.name = name + self.versions = versions + + +class ContainerServiceLinuxProfile(_serialization.Model): + """Profile for Linux VMs in the container service cluster. + + All required parameters must be populated in order to send to server. + + :ivar admin_username: The administrator username to use for Linux VMs. Required. + :vartype admin_username: str + :ivar ssh: The SSH configuration for Linux-based VMs running on Azure. Required. + :vartype ssh: ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshConfiguration + """ + + _validation = { + "admin_username": {"required": True, "pattern": r"^[A-Za-z][-A-Za-z0-9_]*$"}, + "ssh": {"required": True}, + } + + _attribute_map = { + "admin_username": {"key": "adminUsername", "type": "str"}, + "ssh": {"key": "ssh", "type": "ContainerServiceSshConfiguration"}, + } + + def __init__(self, *, admin_username: str, ssh: "_models.ContainerServiceSshConfiguration", **kwargs: Any) -> None: + """ + :keyword admin_username: The administrator username to use for Linux VMs. Required. + :paramtype admin_username: str + :keyword ssh: The SSH configuration for Linux-based VMs running on Azure. Required. + :paramtype ssh: + ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshConfiguration + """ + super().__init__(**kwargs) + self.admin_username = admin_username + self.ssh = ssh + + +class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Profile of network configuration. + + :ivar network_plugin: Network plugin used for building the Kubernetes network. Known values + are: "azure", "kubenet", and "none". + :vartype network_plugin: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPlugin + :ivar network_plugin_mode: The mode the network plugin should use. "overlay" + :vartype network_plugin_mode: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPluginMode + :ivar network_policy: Network policy used for building the Kubernetes network. Known values + are: "none", "calico", "azure", and "cilium". + :vartype network_policy: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPolicy + :ivar network_mode: This cannot be specified if networkPlugin is anything other than 'azure'. + Known values are: "transparent" and "bridge". + :vartype network_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkMode + :ivar network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are: + "azure" and "cilium". + :vartype network_dataplane: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NetworkDataplane + :ivar pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used. + :vartype pod_cidr: str + :ivar service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must + not overlap with any Subnet IP ranges. + :vartype service_cidr: str + :ivar dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within + the Kubernetes service address range specified in serviceCidr. + :vartype dns_service_ip: str + :ivar outbound_type: This can only be set at cluster creation time and cannot be changed later. + For more information see `egress outbound type + `_. Known values are: "loadBalancer", + "userDefinedRouting", "managedNATGateway", and "userAssignedNATGateway". + :vartype outbound_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OutboundType + :ivar load_balancer_sku: The default is 'standard'. See `Azure Load Balancer SKUs + `_ for more information about the + differences between load balancer SKUs. Known values are: "standard" and "basic". + :vartype load_balancer_sku: str or + ~azure.mgmt.containerservice.v2024_07_01.models.LoadBalancerSku + :ivar load_balancer_profile: Profile of the cluster load balancer. + :vartype load_balancer_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfile + :ivar nat_gateway_profile: Profile of the cluster NAT gateway. + :vartype nat_gateway_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterNATGatewayProfile + :ivar pod_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each + IP family (IPv4/IPv6), is expected for dual-stack networking. + :vartype pod_cidrs: list[str] + :ivar service_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for + each IP family (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with + any Subnet IP ranges. + :vartype service_cidrs: list[str] + :ivar ip_families: IP families are used to determine single-stack or dual-stack clusters. For + single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and + IPv6. + :vartype ip_families: list[str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily] + """ + + _validation = { + "pod_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"}, + "service_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"}, + "dns_service_ip": { + "pattern": r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" + }, + } + + _attribute_map = { + "network_plugin": {"key": "networkPlugin", "type": "str"}, + "network_plugin_mode": {"key": "networkPluginMode", "type": "str"}, + "network_policy": {"key": "networkPolicy", "type": "str"}, + "network_mode": {"key": "networkMode", "type": "str"}, + "network_dataplane": {"key": "networkDataplane", "type": "str"}, + "pod_cidr": {"key": "podCidr", "type": "str"}, + "service_cidr": {"key": "serviceCidr", "type": "str"}, + "dns_service_ip": {"key": "dnsServiceIP", "type": "str"}, + "outbound_type": {"key": "outboundType", "type": "str"}, + "load_balancer_sku": {"key": "loadBalancerSku", "type": "str"}, + "load_balancer_profile": {"key": "loadBalancerProfile", "type": "ManagedClusterLoadBalancerProfile"}, + "nat_gateway_profile": {"key": "natGatewayProfile", "type": "ManagedClusterNATGatewayProfile"}, + "pod_cidrs": {"key": "podCidrs", "type": "[str]"}, + "service_cidrs": {"key": "serviceCidrs", "type": "[str]"}, + "ip_families": {"key": "ipFamilies", "type": "[str]"}, + } + + def __init__( + self, + *, + network_plugin: Union[str, "_models.NetworkPlugin"] = "kubenet", + network_plugin_mode: Optional[Union[str, "_models.NetworkPluginMode"]] = None, + network_policy: Optional[Union[str, "_models.NetworkPolicy"]] = None, + network_mode: Optional[Union[str, "_models.NetworkMode"]] = None, + network_dataplane: Optional[Union[str, "_models.NetworkDataplane"]] = None, + pod_cidr: str = "10.244.0.0/16", + service_cidr: str = "10.0.0.0/16", + dns_service_ip: str = "10.0.0.10", + outbound_type: Union[str, "_models.OutboundType"] = "loadBalancer", + load_balancer_sku: Optional[Union[str, "_models.LoadBalancerSku"]] = None, + load_balancer_profile: Optional["_models.ManagedClusterLoadBalancerProfile"] = None, + nat_gateway_profile: Optional["_models.ManagedClusterNATGatewayProfile"] = None, + pod_cidrs: Optional[List[str]] = None, + service_cidrs: Optional[List[str]] = None, + ip_families: Optional[List[Union[str, "_models.IpFamily"]]] = None, + **kwargs: Any + ) -> None: + """ + :keyword network_plugin: Network plugin used for building the Kubernetes network. Known values + are: "azure", "kubenet", and "none". + :paramtype network_plugin: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPlugin + :keyword network_plugin_mode: The mode the network plugin should use. "overlay" + :paramtype network_plugin_mode: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPluginMode + :keyword network_policy: Network policy used for building the Kubernetes network. Known values + are: "none", "calico", "azure", and "cilium". + :paramtype network_policy: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPolicy + :keyword network_mode: This cannot be specified if networkPlugin is anything other than + 'azure'. Known values are: "transparent" and "bridge". + :paramtype network_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkMode + :keyword network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are: + "azure" and "cilium". + :paramtype network_dataplane: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NetworkDataplane + :keyword pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used. + :paramtype pod_cidr: str + :keyword service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It + must not overlap with any Subnet IP ranges. + :paramtype service_cidr: str + :keyword dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be + within the Kubernetes service address range specified in serviceCidr. + :paramtype dns_service_ip: str + :keyword outbound_type: This can only be set at cluster creation time and cannot be changed + later. For more information see `egress outbound type + `_. Known values are: "loadBalancer", + "userDefinedRouting", "managedNATGateway", and "userAssignedNATGateway". + :paramtype outbound_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OutboundType + :keyword load_balancer_sku: The default is 'standard'. See `Azure Load Balancer SKUs + `_ for more information about the + differences between load balancer SKUs. Known values are: "standard" and "basic". + :paramtype load_balancer_sku: str or + ~azure.mgmt.containerservice.v2024_07_01.models.LoadBalancerSku + :keyword load_balancer_profile: Profile of the cluster load balancer. + :paramtype load_balancer_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfile + :keyword nat_gateway_profile: Profile of the cluster NAT gateway. + :paramtype nat_gateway_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterNATGatewayProfile + :keyword pod_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for + each IP family (IPv4/IPv6), is expected for dual-stack networking. + :paramtype pod_cidrs: list[str] + :keyword service_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one + for each IP family (IPv4/IPv6), is expected for dual-stack networking. They must not overlap + with any Subnet IP ranges. + :paramtype service_cidrs: list[str] + :keyword ip_families: IP families are used to determine single-stack or dual-stack clusters. + For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and + IPv6. + :paramtype ip_families: list[str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily] + """ + super().__init__(**kwargs) + self.network_plugin = network_plugin + self.network_plugin_mode = network_plugin_mode + self.network_policy = network_policy + self.network_mode = network_mode + self.network_dataplane = network_dataplane + self.pod_cidr = pod_cidr + self.service_cidr = service_cidr + self.dns_service_ip = dns_service_ip + self.outbound_type = outbound_type + self.load_balancer_sku = load_balancer_sku + self.load_balancer_profile = load_balancer_profile + self.nat_gateway_profile = nat_gateway_profile + self.pod_cidrs = pod_cidrs + self.service_cidrs = service_cidrs + self.ip_families = ip_families + + +class ContainerServiceSshConfiguration(_serialization.Model): + """SSH configuration for Linux-based VMs running on Azure. + + All required parameters must be populated in order to send to server. + + :ivar public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A + maximum of 1 key may be specified. Required. + :vartype public_keys: + list[~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshPublicKey] + """ + + _validation = { + "public_keys": {"required": True}, + } + + _attribute_map = { + "public_keys": {"key": "publicKeys", "type": "[ContainerServiceSshPublicKey]"}, + } + + def __init__(self, *, public_keys: List["_models.ContainerServiceSshPublicKey"], **kwargs: Any) -> None: + """ + :keyword public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A + maximum of 1 key may be specified. Required. + :paramtype public_keys: + list[~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshPublicKey] + """ + super().__init__(**kwargs) + self.public_keys = public_keys + + +class ContainerServiceSshPublicKey(_serialization.Model): + """Contains information about SSH certificate public key data. + + All required parameters must be populated in order to send to server. + + :ivar key_data: Certificate public key used to authenticate with VMs through SSH. The + certificate must be in PEM format with or without headers. Required. + :vartype key_data: str + """ + + _validation = { + "key_data": {"required": True}, + } + + _attribute_map = { + "key_data": {"key": "keyData", "type": "str"}, + } + + def __init__(self, *, key_data: str, **kwargs: Any) -> None: + """ + :keyword key_data: Certificate public key used to authenticate with VMs through SSH. The + certificate must be in PEM format with or without headers. Required. + :paramtype key_data: str + """ + super().__init__(**kwargs) + self.key_data = key_data + + +class CreationData(_serialization.Model): + """Data used when creating a target resource from a source resource. + + :ivar source_resource_id: This is the ARM ID of the source object to be used to create the + target object. + :vartype source_resource_id: str + """ + + _attribute_map = { + "source_resource_id": {"key": "sourceResourceId", "type": "str"}, + } + + def __init__(self, *, source_resource_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword source_resource_id: This is the ARM ID of the source object to be used to create the + target object. + :paramtype source_resource_id: str + """ + super().__init__(**kwargs) + self.source_resource_id = source_resource_id + + +class CredentialResult(_serialization.Model): + """The credential result response. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: The name of the credential. + :vartype name: str + :ivar value: Base64-encoded Kubernetes configuration file. + :vartype value: bytes + """ + + _validation = { + "name": {"readonly": True}, + "value": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "value": {"key": "value", "type": "bytearray"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.name = None + self.value = None + + +class CredentialResults(_serialization.Model): + """The list credential result response. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar kubeconfigs: Base64-encoded Kubernetes configuration file. + :vartype kubeconfigs: list[~azure.mgmt.containerservice.v2024_07_01.models.CredentialResult] + """ + + _validation = { + "kubeconfigs": {"readonly": True}, + } + + _attribute_map = { + "kubeconfigs": {"key": "kubeconfigs", "type": "[CredentialResult]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.kubeconfigs = None + + +class DailySchedule(_serialization.Model): + """For schedules like: 'recur every day' or 'recur every 3 days'. + + All required parameters must be populated in order to send to server. + + :ivar interval_days: Specifies the number of days between each set of occurrences. Required. + :vartype interval_days: int + """ + + _validation = { + "interval_days": {"required": True, "maximum": 7, "minimum": 1}, + } + + _attribute_map = { + "interval_days": {"key": "intervalDays", "type": "int"}, + } + + def __init__(self, *, interval_days: int, **kwargs: Any) -> None: + """ + :keyword interval_days: Specifies the number of days between each set of occurrences. Required. + :paramtype interval_days: int + """ + super().__init__(**kwargs) + self.interval_days = interval_days + + +class DateSpan(_serialization.Model): + """For example, between '2022-12-23' and '2023-01-05'. + + All required parameters must be populated in order to send to server. + + :ivar start: The start date of the date span. Required. + :vartype start: ~datetime.date + :ivar end: The end date of the date span. Required. + :vartype end: ~datetime.date + """ + + _validation = { + "start": {"required": True}, + "end": {"required": True}, + } + + _attribute_map = { + "start": {"key": "start", "type": "date"}, + "end": {"key": "end", "type": "date"}, + } + + def __init__(self, *, start: datetime.date, end: datetime.date, **kwargs: Any) -> None: + """ + :keyword start: The start date of the date span. Required. + :paramtype start: ~datetime.date + :keyword end: The end date of the date span. Required. + :paramtype end: ~datetime.date + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class DelegatedResource(_serialization.Model): + """Delegated resource properties - internal use only. + + :ivar resource_id: The ARM resource id of the delegated resource - internal use only. + :vartype resource_id: str + :ivar tenant_id: The tenant id of the delegated resource - internal use only. + :vartype tenant_id: str + :ivar referral_resource: The delegation id of the referral delegation (optional) - internal use + only. + :vartype referral_resource: str + :ivar location: The source resource location - internal use only. + :vartype location: str + """ + + _attribute_map = { + "resource_id": {"key": "resourceId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "referral_resource": {"key": "referralResource", "type": "str"}, + "location": {"key": "location", "type": "str"}, + } + + def __init__( + self, + *, + resource_id: Optional[str] = None, + tenant_id: Optional[str] = None, + referral_resource: Optional[str] = None, + location: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword resource_id: The ARM resource id of the delegated resource - internal use only. + :paramtype resource_id: str + :keyword tenant_id: The tenant id of the delegated resource - internal use only. + :paramtype tenant_id: str + :keyword referral_resource: The delegation id of the referral delegation (optional) - internal + use only. + :paramtype referral_resource: str + :keyword location: The source resource location - internal use only. + :paramtype location: str + """ + super().__init__(**kwargs) + self.resource_id = resource_id + self.tenant_id = tenant_id + self.referral_resource = referral_resource + self.location = location + + +class EndpointDependency(_serialization.Model): + """A domain name that AKS agent nodes are reaching at. + + :ivar domain_name: The domain name of the dependency. + :vartype domain_name: str + :ivar endpoint_details: The Ports and Protocols used when connecting to domainName. + :vartype endpoint_details: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDetail] + """ + + _attribute_map = { + "domain_name": {"key": "domainName", "type": "str"}, + "endpoint_details": {"key": "endpointDetails", "type": "[EndpointDetail]"}, + } + + def __init__( + self, + *, + domain_name: Optional[str] = None, + endpoint_details: Optional[List["_models.EndpointDetail"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword domain_name: The domain name of the dependency. + :paramtype domain_name: str + :keyword endpoint_details: The Ports and Protocols used when connecting to domainName. + :paramtype endpoint_details: + list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDetail] + """ + super().__init__(**kwargs) + self.domain_name = domain_name + self.endpoint_details = endpoint_details + + +class EndpointDetail(_serialization.Model): + """connect information from the AKS agent nodes to a single endpoint. + + :ivar ip_address: An IP Address that Domain Name currently resolves to. + :vartype ip_address: str + :ivar port: The port an endpoint is connected to. + :vartype port: int + :ivar protocol: The protocol used for connection. + :vartype protocol: str + :ivar description: Description of the detail. + :vartype description: str + """ + + _attribute_map = { + "ip_address": {"key": "ipAddress", "type": "str"}, + "port": {"key": "port", "type": "int"}, + "protocol": {"key": "protocol", "type": "str"}, + "description": {"key": "description", "type": "str"}, + } + + def __init__( + self, + *, + ip_address: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword ip_address: An IP Address that Domain Name currently resolves to. + :paramtype ip_address: str + :keyword port: The port an endpoint is connected to. + :paramtype port: int + :keyword protocol: The protocol used for connection. + :paramtype protocol: str + :keyword description: Description of the detail. + :paramtype description: str + """ + super().__init__(**kwargs) + self.ip_address = ip_address + self.port = port + self.protocol = protocol + self.description = description + + +class ErrorAdditionalInfo(_serialization.Model): + """The resource management error additional info. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: JSON + """ + + _validation = { + "type": {"readonly": True}, + "info": {"readonly": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "info": {"key": "info", "type": "object"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type = None + self.info = None + + +class ErrorDetail(_serialization.Model): + """The error detail. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.mgmt.containerservice.v2024_07_01.models.ErrorAdditionalInfo] + """ + + _validation = { + "code": {"readonly": True}, + "message": {"readonly": True}, + "target": {"readonly": True}, + "details": {"readonly": True}, + "additional_info": {"readonly": True}, + } + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[ErrorDetail]"}, + "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.code = None + self.message = None + self.target = None + self.details = None + self.additional_info = None + + +class ErrorResponse(_serialization.Model): + """Common error response for all Azure Resource Manager APIs to return error details for failed + operations. (This also follows the OData error response format.). + + :ivar error: The error object. + :vartype error: ~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail + """ + + _attribute_map = { + "error": {"key": "error", "type": "ErrorDetail"}, + } + + def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: + """ + :keyword error: The error object. + :paramtype error: ~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail + """ + super().__init__(**kwargs) + self.error = error + + +class ExtendedLocation(_serialization.Model): + """The complex type of the extended location. + + :ivar name: The name of the extended location. + :vartype name: str + :ivar type: The type of the extended location. "EdgeZone" + :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocationTypes + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + type: Optional[Union[str, "_models.ExtendedLocationTypes"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the extended location. + :paramtype name: str + :keyword type: The type of the extended location. "EdgeZone" + :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocationTypes + """ + super().__init__(**kwargs) + self.name = name + self.type = type + + +class IPTag(_serialization.Model): + """Contains the IPTag associated with the object. + + :ivar ip_tag_type: The IP tag type. Example: RoutingPreference. + :vartype ip_tag_type: str + :ivar tag: The value of the IP tag associated with the public IP. Example: Internet. + :vartype tag: str + """ + + _attribute_map = { + "ip_tag_type": {"key": "ipTagType", "type": "str"}, + "tag": {"key": "tag", "type": "str"}, + } + + def __init__(self, *, ip_tag_type: Optional[str] = None, tag: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword ip_tag_type: The IP tag type. Example: RoutingPreference. + :paramtype ip_tag_type: str + :keyword tag: The value of the IP tag associated with the public IP. Example: Internet. + :paramtype tag: str + """ + super().__init__(**kwargs) + self.ip_tag_type = ip_tag_type + self.tag = tag + + +class IstioCertificateAuthority(_serialization.Model): + """Istio Service Mesh Certificate Authority (CA) configuration. For now, we only support plugin + certificates as described here https://aka.ms/asm-plugin-ca. + + :ivar plugin: Plugin certificates information for Service Mesh. + :vartype plugin: + ~azure.mgmt.containerservice.v2024_07_01.models.IstioPluginCertificateAuthority + """ + + _attribute_map = { + "plugin": {"key": "plugin", "type": "IstioPluginCertificateAuthority"}, + } + + def __init__(self, *, plugin: Optional["_models.IstioPluginCertificateAuthority"] = None, **kwargs: Any) -> None: + """ + :keyword plugin: Plugin certificates information for Service Mesh. + :paramtype plugin: + ~azure.mgmt.containerservice.v2024_07_01.models.IstioPluginCertificateAuthority + """ + super().__init__(**kwargs) + self.plugin = plugin + + +class IstioComponents(_serialization.Model): + """Istio components configuration. + + :ivar ingress_gateways: Istio ingress gateways. + :vartype ingress_gateways: + list[~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGateway] + :ivar egress_gateways: Istio egress gateways. + :vartype egress_gateways: + list[~azure.mgmt.containerservice.v2024_07_01.models.IstioEgressGateway] + """ + + _attribute_map = { + "ingress_gateways": {"key": "ingressGateways", "type": "[IstioIngressGateway]"}, + "egress_gateways": {"key": "egressGateways", "type": "[IstioEgressGateway]"}, + } + + def __init__( + self, + *, + ingress_gateways: Optional[List["_models.IstioIngressGateway"]] = None, + egress_gateways: Optional[List["_models.IstioEgressGateway"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword ingress_gateways: Istio ingress gateways. + :paramtype ingress_gateways: + list[~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGateway] + :keyword egress_gateways: Istio egress gateways. + :paramtype egress_gateways: + list[~azure.mgmt.containerservice.v2024_07_01.models.IstioEgressGateway] + """ + super().__init__(**kwargs) + self.ingress_gateways = ingress_gateways + self.egress_gateways = egress_gateways + + +class IstioEgressGateway(_serialization.Model): + """Istio egress gateway configuration. + + All required parameters must be populated in order to send to server. + + :ivar enabled: Whether to enable the egress gateway. Required. + :vartype enabled: bool + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: bool, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable the egress gateway. Required. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class IstioIngressGateway(_serialization.Model): + """Istio ingress gateway configuration. For now, we support up to one external ingress gateway + named ``aks-istio-ingressgateway-external`` and one internal ingress gateway named + ``aks-istio-ingressgateway-internal``. + + All required parameters must be populated in order to send to server. + + :ivar mode: Mode of an ingress gateway. Required. Known values are: "External" and "Internal". + :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGatewayMode + :ivar enabled: Whether to enable the ingress gateway. Required. + :vartype enabled: bool + """ + + _validation = { + "mode": {"required": True}, + "enabled": {"required": True}, + } + + _attribute_map = { + "mode": {"key": "mode", "type": "str"}, + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, mode: Union[str, "_models.IstioIngressGatewayMode"], enabled: bool, **kwargs: Any) -> None: + """ + :keyword mode: Mode of an ingress gateway. Required. Known values are: "External" and + "Internal". + :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGatewayMode + :keyword enabled: Whether to enable the ingress gateway. Required. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.mode = mode + self.enabled = enabled + + +class IstioPluginCertificateAuthority(_serialization.Model): + """Plugin certificates information for Service Mesh. + + :ivar key_vault_id: The resource ID of the Key Vault. + :vartype key_vault_id: str + :ivar cert_object_name: Intermediate certificate object name in Azure Key Vault. + :vartype cert_object_name: str + :ivar key_object_name: Intermediate certificate private key object name in Azure Key Vault. + :vartype key_object_name: str + :ivar root_cert_object_name: Root certificate object name in Azure Key Vault. + :vartype root_cert_object_name: str + :ivar cert_chain_object_name: Certificate chain object name in Azure Key Vault. + :vartype cert_chain_object_name: str + """ + + _attribute_map = { + "key_vault_id": {"key": "keyVaultId", "type": "str"}, + "cert_object_name": {"key": "certObjectName", "type": "str"}, + "key_object_name": {"key": "keyObjectName", "type": "str"}, + "root_cert_object_name": {"key": "rootCertObjectName", "type": "str"}, + "cert_chain_object_name": {"key": "certChainObjectName", "type": "str"}, + } + + def __init__( + self, + *, + key_vault_id: Optional[str] = None, + cert_object_name: Optional[str] = None, + key_object_name: Optional[str] = None, + root_cert_object_name: Optional[str] = None, + cert_chain_object_name: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword key_vault_id: The resource ID of the Key Vault. + :paramtype key_vault_id: str + :keyword cert_object_name: Intermediate certificate object name in Azure Key Vault. + :paramtype cert_object_name: str + :keyword key_object_name: Intermediate certificate private key object name in Azure Key Vault. + :paramtype key_object_name: str + :keyword root_cert_object_name: Root certificate object name in Azure Key Vault. + :paramtype root_cert_object_name: str + :keyword cert_chain_object_name: Certificate chain object name in Azure Key Vault. + :paramtype cert_chain_object_name: str + """ + super().__init__(**kwargs) + self.key_vault_id = key_vault_id + self.cert_object_name = cert_object_name + self.key_object_name = key_object_name + self.root_cert_object_name = root_cert_object_name + self.cert_chain_object_name = cert_chain_object_name + + +class IstioServiceMesh(_serialization.Model): + """Istio service mesh configuration. + + :ivar components: Istio components configuration. + :vartype components: ~azure.mgmt.containerservice.v2024_07_01.models.IstioComponents + :ivar certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration. For + now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca. + :vartype certificate_authority: + ~azure.mgmt.containerservice.v2024_07_01.models.IstioCertificateAuthority + :ivar revisions: The list of revisions of the Istio control plane. When an upgrade is not in + progress, this holds one value. When canary upgrade is in progress, this can only hold two + consecutive values. For more information, see: + https://learn.microsoft.com/en-us/azure/aks/istio-upgrade. + :vartype revisions: list[str] + """ + + _validation = { + "revisions": {"max_items": 2, "min_items": 0, "unique": True}, + } + + _attribute_map = { + "components": {"key": "components", "type": "IstioComponents"}, + "certificate_authority": {"key": "certificateAuthority", "type": "IstioCertificateAuthority"}, + "revisions": {"key": "revisions", "type": "[str]"}, + } + + def __init__( + self, + *, + components: Optional["_models.IstioComponents"] = None, + certificate_authority: Optional["_models.IstioCertificateAuthority"] = None, + revisions: Optional[List[str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword components: Istio components configuration. + :paramtype components: ~azure.mgmt.containerservice.v2024_07_01.models.IstioComponents + :keyword certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration. + For now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca. + :paramtype certificate_authority: + ~azure.mgmt.containerservice.v2024_07_01.models.IstioCertificateAuthority + :keyword revisions: The list of revisions of the Istio control plane. When an upgrade is not in + progress, this holds one value. When canary upgrade is in progress, this can only hold two + consecutive values. For more information, see: + https://learn.microsoft.com/en-us/azure/aks/istio-upgrade. + :paramtype revisions: list[str] + """ + super().__init__(**kwargs) + self.components = components + self.certificate_authority = certificate_authority + self.revisions = revisions + + +class KubeletConfig(_serialization.Model): # pylint: disable=too-many-instance-attributes + """See `AKS custom node configuration + `_ for more details. + + :ivar cpu_manager_policy: The default is 'none'. See `Kubernetes CPU management policies + `_ + for more information. Allowed values are 'none' and 'static'. + :vartype cpu_manager_policy: str + :ivar cpu_cfs_quota: The default is true. + :vartype cpu_cfs_quota: bool + :ivar cpu_cfs_quota_period: The default is '100ms.' Valid values are a sequence of decimal + numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported + units are 'ns', 'us', 'ms', 's', 'm', and 'h'. + :vartype cpu_cfs_quota_period: str + :ivar image_gc_high_threshold: To disable image garbage collection, set to 100. The default is + 85%. + :vartype image_gc_high_threshold: int + :ivar image_gc_low_threshold: This cannot be set higher than imageGcHighThreshold. The default + is 80%. + :vartype image_gc_low_threshold: int + :ivar topology_manager_policy: For more information see `Kubernetes Topology Manager + `_. The default is + 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'. + :vartype topology_manager_policy: str + :ivar allowed_unsafe_sysctls: Allowed list of unsafe sysctls or unsafe sysctl patterns (ending + in ``*``\\ ). + :vartype allowed_unsafe_sysctls: list[str] + :ivar fail_swap_on: If set to true it will make the Kubelet fail to start if swap is enabled on + the node. + :vartype fail_swap_on: bool + :ivar container_log_max_size_mb: The maximum size (e.g. 10Mi) of container log file before it + is rotated. + :vartype container_log_max_size_mb: int + :ivar container_log_max_files: The maximum number of container log files that can be present + for a container. The number must be ≥ 2. + :vartype container_log_max_files: int + :ivar pod_max_pids: The maximum number of processes per pod. + :vartype pod_max_pids: int + """ + + _validation = { + "container_log_max_files": {"minimum": 2}, + } + + _attribute_map = { + "cpu_manager_policy": {"key": "cpuManagerPolicy", "type": "str"}, + "cpu_cfs_quota": {"key": "cpuCfsQuota", "type": "bool"}, + "cpu_cfs_quota_period": {"key": "cpuCfsQuotaPeriod", "type": "str"}, + "image_gc_high_threshold": {"key": "imageGcHighThreshold", "type": "int"}, + "image_gc_low_threshold": {"key": "imageGcLowThreshold", "type": "int"}, + "topology_manager_policy": {"key": "topologyManagerPolicy", "type": "str"}, + "allowed_unsafe_sysctls": {"key": "allowedUnsafeSysctls", "type": "[str]"}, + "fail_swap_on": {"key": "failSwapOn", "type": "bool"}, + "container_log_max_size_mb": {"key": "containerLogMaxSizeMB", "type": "int"}, + "container_log_max_files": {"key": "containerLogMaxFiles", "type": "int"}, + "pod_max_pids": {"key": "podMaxPids", "type": "int"}, + } + + def __init__( + self, + *, + cpu_manager_policy: Optional[str] = None, + cpu_cfs_quota: Optional[bool] = None, + cpu_cfs_quota_period: Optional[str] = None, + image_gc_high_threshold: Optional[int] = None, + image_gc_low_threshold: Optional[int] = None, + topology_manager_policy: Optional[str] = None, + allowed_unsafe_sysctls: Optional[List[str]] = None, + fail_swap_on: Optional[bool] = None, + container_log_max_size_mb: Optional[int] = None, + container_log_max_files: Optional[int] = None, + pod_max_pids: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword cpu_manager_policy: The default is 'none'. See `Kubernetes CPU management policies + `_ + for more information. Allowed values are 'none' and 'static'. + :paramtype cpu_manager_policy: str + :keyword cpu_cfs_quota: The default is true. + :paramtype cpu_cfs_quota: bool + :keyword cpu_cfs_quota_period: The default is '100ms.' Valid values are a sequence of decimal + numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported + units are 'ns', 'us', 'ms', 's', 'm', and 'h'. + :paramtype cpu_cfs_quota_period: str + :keyword image_gc_high_threshold: To disable image garbage collection, set to 100. The default + is 85%. + :paramtype image_gc_high_threshold: int + :keyword image_gc_low_threshold: This cannot be set higher than imageGcHighThreshold. The + default is 80%. + :paramtype image_gc_low_threshold: int + :keyword topology_manager_policy: For more information see `Kubernetes Topology Manager + `_. The default is + 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'. + :paramtype topology_manager_policy: str + :keyword allowed_unsafe_sysctls: Allowed list of unsafe sysctls or unsafe sysctl patterns + (ending in ``*``\\ ). + :paramtype allowed_unsafe_sysctls: list[str] + :keyword fail_swap_on: If set to true it will make the Kubelet fail to start if swap is enabled + on the node. + :paramtype fail_swap_on: bool + :keyword container_log_max_size_mb: The maximum size (e.g. 10Mi) of container log file before + it is rotated. + :paramtype container_log_max_size_mb: int + :keyword container_log_max_files: The maximum number of container log files that can be present + for a container. The number must be ≥ 2. + :paramtype container_log_max_files: int + :keyword pod_max_pids: The maximum number of processes per pod. + :paramtype pod_max_pids: int + """ + super().__init__(**kwargs) + self.cpu_manager_policy = cpu_manager_policy + self.cpu_cfs_quota = cpu_cfs_quota + self.cpu_cfs_quota_period = cpu_cfs_quota_period + self.image_gc_high_threshold = image_gc_high_threshold + self.image_gc_low_threshold = image_gc_low_threshold + self.topology_manager_policy = topology_manager_policy + self.allowed_unsafe_sysctls = allowed_unsafe_sysctls + self.fail_swap_on = fail_swap_on + self.container_log_max_size_mb = container_log_max_size_mb + self.container_log_max_files = container_log_max_files + self.pod_max_pids = pod_max_pids + + +class KubernetesPatchVersion(_serialization.Model): + """Kubernetes patch version profile. + + :ivar upgrades: Possible upgrade path for given patch version. + :vartype upgrades: list[str] + """ + + _attribute_map = { + "upgrades": {"key": "upgrades", "type": "[str]"}, + } + + def __init__(self, *, upgrades: Optional[List[str]] = None, **kwargs: Any) -> None: + """ + :keyword upgrades: Possible upgrade path for given patch version. + :paramtype upgrades: list[str] + """ + super().__init__(**kwargs) + self.upgrades = upgrades + + +class KubernetesVersion(_serialization.Model): + """Kubernetes version profile for given major.minor release. + + :ivar version: major.minor version of Kubernetes release. + :vartype version: str + :ivar capabilities: Capabilities on this Kubernetes version. + :vartype capabilities: + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionCapabilities + :ivar is_default: Whether this version is default. + :vartype is_default: bool + :ivar is_preview: Whether this version is in preview mode. + :vartype is_preview: bool + :ivar patch_versions: Patch versions of Kubernetes release. + :vartype patch_versions: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesPatchVersion] + """ + + _attribute_map = { + "version": {"key": "version", "type": "str"}, + "capabilities": {"key": "capabilities", "type": "KubernetesVersionCapabilities"}, + "is_default": {"key": "isDefault", "type": "bool"}, + "is_preview": {"key": "isPreview", "type": "bool"}, + "patch_versions": {"key": "patchVersions", "type": "{KubernetesPatchVersion}"}, + } + + def __init__( + self, + *, + version: Optional[str] = None, + capabilities: Optional["_models.KubernetesVersionCapabilities"] = None, + is_default: Optional[bool] = None, + is_preview: Optional[bool] = None, + patch_versions: Optional[Dict[str, "_models.KubernetesPatchVersion"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword version: major.minor version of Kubernetes release. + :paramtype version: str + :keyword capabilities: Capabilities on this Kubernetes version. + :paramtype capabilities: + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionCapabilities + :keyword is_default: Whether this version is default. + :paramtype is_default: bool + :keyword is_preview: Whether this version is in preview mode. + :paramtype is_preview: bool + :keyword patch_versions: Patch versions of Kubernetes release. + :paramtype patch_versions: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesPatchVersion] + """ + super().__init__(**kwargs) + self.version = version + self.capabilities = capabilities + self.is_default = is_default + self.is_preview = is_preview + self.patch_versions = patch_versions + + +class KubernetesVersionCapabilities(_serialization.Model): + """Capabilities on this Kubernetes version. + + :ivar support_plan: + :vartype support_plan: list[str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan] + """ + + _attribute_map = { + "support_plan": {"key": "supportPlan", "type": "[str]"}, + } + + def __init__( + self, *, support_plan: Optional[List[Union[str, "_models.KubernetesSupportPlan"]]] = None, **kwargs: Any + ) -> None: + """ + :keyword support_plan: + :paramtype support_plan: list[str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan] + """ + super().__init__(**kwargs) + self.support_plan = support_plan + + +class KubernetesVersionListResult(_serialization.Model): + """Hold values properties, which is array of KubernetesVersion. + + :ivar values: Array of AKS supported Kubernetes versions. + :vartype values: list[~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersion] + """ + + _attribute_map = { + "values": {"key": "values", "type": "[KubernetesVersion]"}, + } + + def __init__(self, *, values: Optional[List["_models.KubernetesVersion"]] = None, **kwargs: Any) -> None: + """ + :keyword values: Array of AKS supported Kubernetes versions. + :paramtype values: list[~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersion] + """ + super().__init__(**kwargs) + self.values = values + + +class LinuxOSConfig(_serialization.Model): + """See `AKS custom node configuration + `_ for more details. + + :ivar sysctls: Sysctl settings for Linux agent nodes. + :vartype sysctls: ~azure.mgmt.containerservice.v2024_07_01.models.SysctlConfig + :ivar transparent_huge_page_enabled: Valid values are 'always', 'madvise', and 'never'. The + default is 'always'. For more information see `Transparent Hugepages + `_. + :vartype transparent_huge_page_enabled: str + :ivar transparent_huge_page_defrag: Valid values are 'always', 'defer', 'defer+madvise', + 'madvise' and 'never'. The default is 'madvise'. For more information see `Transparent + Hugepages + `_. + :vartype transparent_huge_page_defrag: str + :ivar swap_file_size_mb: The size in MB of a swap file that will be created on each node. + :vartype swap_file_size_mb: int + """ + + _attribute_map = { + "sysctls": {"key": "sysctls", "type": "SysctlConfig"}, + "transparent_huge_page_enabled": {"key": "transparentHugePageEnabled", "type": "str"}, + "transparent_huge_page_defrag": {"key": "transparentHugePageDefrag", "type": "str"}, + "swap_file_size_mb": {"key": "swapFileSizeMB", "type": "int"}, + } + + def __init__( + self, + *, + sysctls: Optional["_models.SysctlConfig"] = None, + transparent_huge_page_enabled: Optional[str] = None, + transparent_huge_page_defrag: Optional[str] = None, + swap_file_size_mb: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword sysctls: Sysctl settings for Linux agent nodes. + :paramtype sysctls: ~azure.mgmt.containerservice.v2024_07_01.models.SysctlConfig + :keyword transparent_huge_page_enabled: Valid values are 'always', 'madvise', and 'never'. The + default is 'always'. For more information see `Transparent Hugepages + `_. + :paramtype transparent_huge_page_enabled: str + :keyword transparent_huge_page_defrag: Valid values are 'always', 'defer', 'defer+madvise', + 'madvise' and 'never'. The default is 'madvise'. For more information see `Transparent + Hugepages + `_. + :paramtype transparent_huge_page_defrag: str + :keyword swap_file_size_mb: The size in MB of a swap file that will be created on each node. + :paramtype swap_file_size_mb: int + """ + super().__init__(**kwargs) + self.sysctls = sysctls + self.transparent_huge_page_enabled = transparent_huge_page_enabled + self.transparent_huge_page_defrag = transparent_huge_page_defrag + self.swap_file_size_mb = swap_file_size_mb + + +class Machine(SubResource): + """A machine. Contains details about the underlying virtual machine. A machine may be visible here + but not in kubectl get nodes; if so it may be because the machine has not been registered with + the Kubernetes API Server yet. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource ID. + :vartype id: str + :ivar name: The name of the resource that is unique within a resource group. This name can be + used to access the resource. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar properties: The properties of the machine. + :vartype properties: ~azure.mgmt.containerservice.v2024_07_01.models.MachineProperties + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "properties": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "properties": {"key": "properties", "type": "MachineProperties"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.properties = None + + +class MachineIpAddress(_serialization.Model): + """The machine IP address details. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar family: To determine if address belongs IPv4 or IPv6 family. Known values are: "IPv4" and + "IPv6". + :vartype family: str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily + :ivar ip: IPv4 or IPv6 address of the machine. + :vartype ip: str + """ + + _validation = { + "family": {"readonly": True}, + "ip": {"readonly": True}, + } + + _attribute_map = { + "family": {"key": "family", "type": "str"}, + "ip": {"key": "ip", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.family = None + self.ip = None + + +class MachineListResult(_serialization.Model): + """The response from the List Machines operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar next_link: The URL to get the next set of machine results. + :vartype next_link: str + :ivar value: The list of Machines in cluster. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "next_link": {"key": "nextLink", "type": "str"}, + "value": {"key": "value", "type": "[Machine]"}, + } + + def __init__(self, *, value: Optional[List["_models.Machine"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The list of Machines in cluster. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + """ + super().__init__(**kwargs) + self.next_link = None + self.value = value + + +class MachineNetworkProperties(_serialization.Model): + """network properties of the machine. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar ip_addresses: IPv4, IPv6 addresses of the machine. + :vartype ip_addresses: list[~azure.mgmt.containerservice.v2024_07_01.models.MachineIpAddress] + """ + + _validation = { + "ip_addresses": {"readonly": True}, + } + + _attribute_map = { + "ip_addresses": {"key": "ipAddresses", "type": "[MachineIpAddress]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.ip_addresses = None + + +class MachineProperties(_serialization.Model): + """The properties of the machine. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar network: network properties of the machine. + :vartype network: ~azure.mgmt.containerservice.v2024_07_01.models.MachineNetworkProperties + :ivar resource_id: Azure resource id of the machine. It can be used to GET underlying VM + Instance. + :vartype resource_id: str + """ + + _validation = { + "network": {"readonly": True}, + "resource_id": {"readonly": True}, + } + + _attribute_map = { + "network": {"key": "network", "type": "MachineNetworkProperties"}, + "resource_id": {"key": "resourceId", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.network = None + self.resource_id = None + + +class MaintenanceConfiguration(SubResource): + """See `planned maintenance `_ for more + information about planned maintenance. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource ID. + :vartype id: str + :ivar name: The name of the resource that is unique within a resource group. This name can be + used to access the resource. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar time_in_week: If two array entries specify the same day of the week, the applied + configuration is the union of times in both entries. + :vartype time_in_week: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeInWeek] + :ivar not_allowed_time: Time slots on which upgrade is not allowed. + :vartype not_allowed_time: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeSpan] + :ivar maintenance_window: Maintenance window for the maintenance configuration. + :vartype maintenance_window: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceWindow + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "time_in_week": {"key": "properties.timeInWeek", "type": "[TimeInWeek]"}, + "not_allowed_time": {"key": "properties.notAllowedTime", "type": "[TimeSpan]"}, + "maintenance_window": {"key": "properties.maintenanceWindow", "type": "MaintenanceWindow"}, + } + + def __init__( + self, + *, + time_in_week: Optional[List["_models.TimeInWeek"]] = None, + not_allowed_time: Optional[List["_models.TimeSpan"]] = None, + maintenance_window: Optional["_models.MaintenanceWindow"] = None, + **kwargs: Any + ) -> None: + """ + :keyword time_in_week: If two array entries specify the same day of the week, the applied + configuration is the union of times in both entries. + :paramtype time_in_week: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeInWeek] + :keyword not_allowed_time: Time slots on which upgrade is not allowed. + :paramtype not_allowed_time: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeSpan] + :keyword maintenance_window: Maintenance window for the maintenance configuration. + :paramtype maintenance_window: + ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceWindow + """ + super().__init__(**kwargs) + self.system_data = None + self.time_in_week = time_in_week + self.not_allowed_time = not_allowed_time + self.maintenance_window = maintenance_window + + +class MaintenanceConfigurationListResult(_serialization.Model): + """The response from the List maintenance configurations operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: The list of maintenance configurations. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + :ivar next_link: The URL to get the next set of maintenance configuration results. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[MaintenanceConfiguration]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.MaintenanceConfiguration"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The list of maintenance configurations. + :paramtype value: + list[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class MaintenanceWindow(_serialization.Model): + """Maintenance window used to configure scheduled auto-upgrade for a Managed Cluster. + + All required parameters must be populated in order to send to server. + + :ivar schedule: Recurrence schedule for the maintenance window. Required. + :vartype schedule: ~azure.mgmt.containerservice.v2024_07_01.models.Schedule + :ivar duration_hours: Length of maintenance window range from 4 to 24 hours. + :vartype duration_hours: int + :ivar utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and '-07:00' + for PST. If not specified, the default is '+00:00'. + :vartype utc_offset: str + :ivar start_date: The date the maintenance window activates. If the current date is before this + date, the maintenance window is inactive and will not be used for upgrades. If not specified, + the maintenance window will be active right away. + :vartype start_date: ~datetime.date + :ivar start_time: The start time of the maintenance window. Accepted values are from '00:00' to + '23:59'. 'utcOffset' applies to this field. For example: '02:00' with 'utcOffset: +02:00' means + UTC time '00:00'. Required. + :vartype start_time: str + :ivar not_allowed_dates: Date ranges on which upgrade is not allowed. 'utcOffset' applies to + this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to + '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC + time. + :vartype not_allowed_dates: list[~azure.mgmt.containerservice.v2024_07_01.models.DateSpan] + """ + + _validation = { + "schedule": {"required": True}, + "duration_hours": {"required": True, "maximum": 24, "minimum": 4}, + "utc_offset": {"pattern": r"^(-|\+)[0-9]{2}:[0-9]{2}$"}, + "start_time": {"required": True, "pattern": r"^\d{2}:\d{2}$"}, + } + + _attribute_map = { + "schedule": {"key": "schedule", "type": "Schedule"}, + "duration_hours": {"key": "durationHours", "type": "int"}, + "utc_offset": {"key": "utcOffset", "type": "str"}, + "start_date": {"key": "startDate", "type": "date"}, + "start_time": {"key": "startTime", "type": "str"}, + "not_allowed_dates": {"key": "notAllowedDates", "type": "[DateSpan]"}, + } + + def __init__( + self, + *, + schedule: "_models.Schedule", + duration_hours: int = 24, + start_time: str, + utc_offset: Optional[str] = None, + start_date: Optional[datetime.date] = None, + not_allowed_dates: Optional[List["_models.DateSpan"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword schedule: Recurrence schedule for the maintenance window. Required. + :paramtype schedule: ~azure.mgmt.containerservice.v2024_07_01.models.Schedule + :keyword duration_hours: Length of maintenance window range from 4 to 24 hours. + :paramtype duration_hours: int + :keyword utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and + '-07:00' for PST. If not specified, the default is '+00:00'. + :paramtype utc_offset: str + :keyword start_date: The date the maintenance window activates. If the current date is before + this date, the maintenance window is inactive and will not be used for upgrades. If not + specified, the maintenance window will be active right away. + :paramtype start_date: ~datetime.date + :keyword start_time: The start time of the maintenance window. Accepted values are from '00:00' + to '23:59'. 'utcOffset' applies to this field. For example: '02:00' with 'utcOffset: +02:00' + means UTC time '00:00'. Required. + :paramtype start_time: str + :keyword not_allowed_dates: Date ranges on which upgrade is not allowed. 'utcOffset' applies to + this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to + '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC + time. + :paramtype not_allowed_dates: list[~azure.mgmt.containerservice.v2024_07_01.models.DateSpan] + """ + super().__init__(**kwargs) + self.schedule = schedule + self.duration_hours = duration_hours + self.utc_offset = utc_offset + self.start_date = start_date + self.start_time = start_time + self.not_allowed_dates = not_allowed_dates + + +class Resource(_serialization.Model): + """Common fields that are returned in the response for all Azure Resource Manager resources. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.system_data = None + + +class TrackedResource(Resource): + """The resource model definition for an Azure Resource Manager tracked top level resource which + has 'tags' and a 'location'. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "location": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "tags": {"key": "tags", "type": "{str}"}, + "location": {"key": "location", "type": "str"}, + } + + def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword location: The geo-location where the resource lives. Required. + :paramtype location: str + """ + super().__init__(**kwargs) + self.tags = tags + self.location = location + + +class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attributes + """Managed cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + :ivar sku: The managed cluster SKU. + :vartype sku: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKU + :ivar extended_location: The extended location of the Virtual Machine. + :vartype extended_location: ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocation + :ivar identity: The identity of the managed cluster, if configured. + :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIdentity + :ivar provisioning_state: The current provisioning state. + :vartype provisioning_state: str + :ivar power_state: The Power State of the cluster. + :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar max_agent_pools: The max number of agent pools for the managed cluster. + :vartype max_agent_pools: int + :ivar kubernetes_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. When you upgrade a supported AKS cluster, Kubernetes minor versions + cannot be skipped. All upgrades must be performed sequentially by major version number. For + example, upgrades between 1.14.x -> 1.15.x or 1.15.x -> 1.16.x are allowed, however 1.14.x -> + 1.16.x is not allowed. See `upgrading an AKS cluster + `_ for more details. + :vartype kubernetes_version: str + :ivar current_kubernetes_version: If kubernetesVersion was a fully specified version + , this field will be exactly equal to it. If kubernetesVersion was + , this field will contain the full version being used. + :vartype current_kubernetes_version: str + :ivar dns_prefix: This cannot be updated once the Managed Cluster has been created. + :vartype dns_prefix: str + :ivar fqdn_subdomain: This cannot be updated once the Managed Cluster has been created. + :vartype fqdn_subdomain: str + :ivar fqdn: The FQDN of the master pool. + :vartype fqdn: str + :ivar private_fqdn: The FQDN of private cluster. + :vartype private_fqdn: str + :ivar azure_portal_fqdn: The Azure Portal requires certain Cross-Origin Resource Sharing (CORS) + headers to be sent in some responses, which Kubernetes APIServer doesn't handle by default. + This special FQDN supports CORS, allowing the Azure Portal to function properly. + :vartype azure_portal_fqdn: str + :ivar agent_pool_profiles: The agent pool properties. + :vartype agent_pool_profiles: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAgentPoolProfile] + :ivar linux_profile: The profile for Linux VMs in the Managed Cluster. + :vartype linux_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceLinuxProfile + :ivar windows_profile: The profile for Windows VMs in the Managed Cluster. + :vartype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWindowsProfile + :ivar service_principal_profile: Information about a service principal identity for the cluster + to use for manipulating Azure APIs. + :vartype service_principal_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :ivar addon_profiles: The profile of managed cluster add-on. + :vartype addon_profiles: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfile] + :ivar pod_identity_profile: See `use AAD pod identity + `_ for more details on AAD pod + identity integration. + :vartype pod_identity_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProfile + :ivar oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster. + :vartype oidc_issuer_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterOIDCIssuerProfile + :ivar node_resource_group: The name of the resource group containing agent pool nodes. + :vartype node_resource_group: str + :ivar enable_rbac: Whether to enable Kubernetes Role-Based Access Control. + :vartype enable_rbac: bool + :ivar support_plan: The support plan for the Managed Cluster. If unspecified, the default is + 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport". + :vartype support_plan: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan + :ivar enable_pod_security_policy: (DEPRECATED) Whether to enable Kubernetes pod security policy + (preview). PodSecurityPolicy was deprecated in Kubernetes v1.21, and removed from Kubernetes in + v1.25. Learn more at https://aka.ms/k8s/psp and https://aka.ms/aks/psp. + :vartype enable_pod_security_policy: bool + :ivar network_profile: The network configuration profile. + :vartype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceNetworkProfile + :ivar aad_profile: The Azure Active Directory configuration. + :vartype aad_profile: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :ivar auto_upgrade_profile: The auto upgrade configuration. + :vartype auto_upgrade_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAutoUpgradeProfile + :ivar upgrade_settings: Settings for upgrading a cluster. + :vartype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.ClusterUpgradeSettings + :ivar auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled. + :vartype auto_scaler_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPropertiesAutoScalerProfile + :ivar api_server_access_profile: The access profile for managed cluster API server. + :vartype api_server_access_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAPIServerAccessProfile + :ivar disk_encryption_set_id: This is of the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. # pylint: disable=line-too-long + :vartype disk_encryption_set_id: str + :ivar identity_profile: Identities associated with the cluster. + :vartype identity_profile: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity] + :ivar private_link_resources: Private link resources associated with the cluster. + :vartype private_link_resources: + list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + :ivar disable_local_accounts: If set to true, getting static credentials will be disabled for + this cluster. This must only be used on Managed Clusters that are AAD enabled. For more details + see `disable local accounts + `_. + :vartype disable_local_accounts: bool + :ivar http_proxy_config: Configurations for provisioning the cluster with HTTP proxy servers. + :vartype http_proxy_config: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterHTTPProxyConfig + :ivar security_profile: Security profile for the managed cluster. + :vartype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfile + :ivar storage_profile: Storage profile for the managed cluster. + :vartype storage_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfile + :ivar ingress_profile: Ingress profile for the managed cluster. + :vartype ingress_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfile + :ivar public_network_access: Allow or deny public network access for AKS. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.containerservice.v2024_07_01.models.PublicNetworkAccess + :ivar workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster. + :vartype workload_auto_scaler_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfile + :ivar azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed cluster. + :vartype azure_monitor_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfile + :ivar service_mesh_profile: Service mesh profile for a managed cluster. + :vartype service_mesh_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshProfile + :ivar resource_uid: The resourceUID uniquely identifies ManagedClusters that reuse ARM + ResourceIds (i.e: create, delete, create sequence). + :vartype resource_uid: str + :ivar metrics_profile: Optional cluster metrics configuration. + :vartype metrics_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterMetricsProfile + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "location": {"required": True}, + "provisioning_state": {"readonly": True}, + "power_state": {"readonly": True}, + "max_agent_pools": {"readonly": True}, + "current_kubernetes_version": {"readonly": True}, + "fqdn": {"readonly": True}, + "private_fqdn": {"readonly": True}, + "azure_portal_fqdn": {"readonly": True}, + "resource_uid": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "tags": {"key": "tags", "type": "{str}"}, + "location": {"key": "location", "type": "str"}, + "sku": {"key": "sku", "type": "ManagedClusterSKU"}, + "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"}, + "identity": {"key": "identity", "type": "ManagedClusterIdentity"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "power_state": {"key": "properties.powerState", "type": "PowerState"}, + "max_agent_pools": {"key": "properties.maxAgentPools", "type": "int"}, + "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"}, + "current_kubernetes_version": {"key": "properties.currentKubernetesVersion", "type": "str"}, + "dns_prefix": {"key": "properties.dnsPrefix", "type": "str"}, + "fqdn_subdomain": {"key": "properties.fqdnSubdomain", "type": "str"}, + "fqdn": {"key": "properties.fqdn", "type": "str"}, + "private_fqdn": {"key": "properties.privateFQDN", "type": "str"}, + "azure_portal_fqdn": {"key": "properties.azurePortalFQDN", "type": "str"}, + "agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterAgentPoolProfile]"}, + "linux_profile": {"key": "properties.linuxProfile", "type": "ContainerServiceLinuxProfile"}, + "windows_profile": {"key": "properties.windowsProfile", "type": "ManagedClusterWindowsProfile"}, + "service_principal_profile": { + "key": "properties.servicePrincipalProfile", + "type": "ManagedClusterServicePrincipalProfile", + }, + "addon_profiles": {"key": "properties.addonProfiles", "type": "{ManagedClusterAddonProfile}"}, + "pod_identity_profile": {"key": "properties.podIdentityProfile", "type": "ManagedClusterPodIdentityProfile"}, + "oidc_issuer_profile": {"key": "properties.oidcIssuerProfile", "type": "ManagedClusterOIDCIssuerProfile"}, + "node_resource_group": {"key": "properties.nodeResourceGroup", "type": "str"}, + "enable_rbac": {"key": "properties.enableRBAC", "type": "bool"}, + "support_plan": {"key": "properties.supportPlan", "type": "str"}, + "enable_pod_security_policy": {"key": "properties.enablePodSecurityPolicy", "type": "bool"}, + "network_profile": {"key": "properties.networkProfile", "type": "ContainerServiceNetworkProfile"}, + "aad_profile": {"key": "properties.aadProfile", "type": "ManagedClusterAADProfile"}, + "auto_upgrade_profile": {"key": "properties.autoUpgradeProfile", "type": "ManagedClusterAutoUpgradeProfile"}, + "upgrade_settings": {"key": "properties.upgradeSettings", "type": "ClusterUpgradeSettings"}, + "auto_scaler_profile": { + "key": "properties.autoScalerProfile", + "type": "ManagedClusterPropertiesAutoScalerProfile", + }, + "api_server_access_profile": { + "key": "properties.apiServerAccessProfile", + "type": "ManagedClusterAPIServerAccessProfile", + }, + "disk_encryption_set_id": {"key": "properties.diskEncryptionSetID", "type": "str"}, + "identity_profile": {"key": "properties.identityProfile", "type": "{UserAssignedIdentity}"}, + "private_link_resources": {"key": "properties.privateLinkResources", "type": "[PrivateLinkResource]"}, + "disable_local_accounts": {"key": "properties.disableLocalAccounts", "type": "bool"}, + "http_proxy_config": {"key": "properties.httpProxyConfig", "type": "ManagedClusterHTTPProxyConfig"}, + "security_profile": {"key": "properties.securityProfile", "type": "ManagedClusterSecurityProfile"}, + "storage_profile": {"key": "properties.storageProfile", "type": "ManagedClusterStorageProfile"}, + "ingress_profile": {"key": "properties.ingressProfile", "type": "ManagedClusterIngressProfile"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "workload_auto_scaler_profile": { + "key": "properties.workloadAutoScalerProfile", + "type": "ManagedClusterWorkloadAutoScalerProfile", + }, + "azure_monitor_profile": {"key": "properties.azureMonitorProfile", "type": "ManagedClusterAzureMonitorProfile"}, + "service_mesh_profile": {"key": "properties.serviceMeshProfile", "type": "ServiceMeshProfile"}, + "resource_uid": {"key": "properties.resourceUID", "type": "str"}, + "metrics_profile": {"key": "properties.metricsProfile", "type": "ManagedClusterMetricsProfile"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + sku: Optional["_models.ManagedClusterSKU"] = None, + extended_location: Optional["_models.ExtendedLocation"] = None, + identity: Optional["_models.ManagedClusterIdentity"] = None, + kubernetes_version: Optional[str] = None, + dns_prefix: Optional[str] = None, + fqdn_subdomain: Optional[str] = None, + agent_pool_profiles: Optional[List["_models.ManagedClusterAgentPoolProfile"]] = None, + linux_profile: Optional["_models.ContainerServiceLinuxProfile"] = None, + windows_profile: Optional["_models.ManagedClusterWindowsProfile"] = None, + service_principal_profile: Optional["_models.ManagedClusterServicePrincipalProfile"] = None, + addon_profiles: Optional[Dict[str, "_models.ManagedClusterAddonProfile"]] = None, + pod_identity_profile: Optional["_models.ManagedClusterPodIdentityProfile"] = None, + oidc_issuer_profile: Optional["_models.ManagedClusterOIDCIssuerProfile"] = None, + node_resource_group: Optional[str] = None, + enable_rbac: Optional[bool] = None, + support_plan: Optional[Union[str, "_models.KubernetesSupportPlan"]] = None, + enable_pod_security_policy: Optional[bool] = None, + network_profile: Optional["_models.ContainerServiceNetworkProfile"] = None, + aad_profile: Optional["_models.ManagedClusterAADProfile"] = None, + auto_upgrade_profile: Optional["_models.ManagedClusterAutoUpgradeProfile"] = None, + upgrade_settings: Optional["_models.ClusterUpgradeSettings"] = None, + auto_scaler_profile: Optional["_models.ManagedClusterPropertiesAutoScalerProfile"] = None, + api_server_access_profile: Optional["_models.ManagedClusterAPIServerAccessProfile"] = None, + disk_encryption_set_id: Optional[str] = None, + identity_profile: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None, + private_link_resources: Optional[List["_models.PrivateLinkResource"]] = None, + disable_local_accounts: Optional[bool] = None, + http_proxy_config: Optional["_models.ManagedClusterHTTPProxyConfig"] = None, + security_profile: Optional["_models.ManagedClusterSecurityProfile"] = None, + storage_profile: Optional["_models.ManagedClusterStorageProfile"] = None, + ingress_profile: Optional["_models.ManagedClusterIngressProfile"] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + workload_auto_scaler_profile: Optional["_models.ManagedClusterWorkloadAutoScalerProfile"] = None, + azure_monitor_profile: Optional["_models.ManagedClusterAzureMonitorProfile"] = None, + service_mesh_profile: Optional["_models.ServiceMeshProfile"] = None, + metrics_profile: Optional["_models.ManagedClusterMetricsProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword location: The geo-location where the resource lives. Required. + :paramtype location: str + :keyword sku: The managed cluster SKU. + :paramtype sku: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKU + :keyword extended_location: The extended location of the Virtual Machine. + :paramtype extended_location: ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocation + :keyword identity: The identity of the managed cluster, if configured. + :paramtype identity: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIdentity + :keyword kubernetes_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. When you upgrade a supported AKS cluster, Kubernetes minor versions + cannot be skipped. All upgrades must be performed sequentially by major version number. For + example, upgrades between 1.14.x -> 1.15.x or 1.15.x -> 1.16.x are allowed, however 1.14.x -> + 1.16.x is not allowed. See `upgrading an AKS cluster + `_ for more details. + :paramtype kubernetes_version: str + :keyword dns_prefix: This cannot be updated once the Managed Cluster has been created. + :paramtype dns_prefix: str + :keyword fqdn_subdomain: This cannot be updated once the Managed Cluster has been created. + :paramtype fqdn_subdomain: str + :keyword agent_pool_profiles: The agent pool properties. + :paramtype agent_pool_profiles: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAgentPoolProfile] + :keyword linux_profile: The profile for Linux VMs in the Managed Cluster. + :paramtype linux_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceLinuxProfile + :keyword windows_profile: The profile for Windows VMs in the Managed Cluster. + :paramtype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWindowsProfile + :keyword service_principal_profile: Information about a service principal identity for the + cluster to use for manipulating Azure APIs. + :paramtype service_principal_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :keyword addon_profiles: The profile of managed cluster add-on. + :paramtype addon_profiles: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfile] + :keyword pod_identity_profile: See `use AAD pod identity + `_ for more details on AAD pod + identity integration. + :paramtype pod_identity_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProfile + :keyword oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster. + :paramtype oidc_issuer_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterOIDCIssuerProfile + :keyword node_resource_group: The name of the resource group containing agent pool nodes. + :paramtype node_resource_group: str + :keyword enable_rbac: Whether to enable Kubernetes Role-Based Access Control. + :paramtype enable_rbac: bool + :keyword support_plan: The support plan for the Managed Cluster. If unspecified, the default is + 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport". + :paramtype support_plan: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan + :keyword enable_pod_security_policy: (DEPRECATED) Whether to enable Kubernetes pod security + policy (preview). PodSecurityPolicy was deprecated in Kubernetes v1.21, and removed from + Kubernetes in v1.25. Learn more at https://aka.ms/k8s/psp and https://aka.ms/aks/psp. + :paramtype enable_pod_security_policy: bool + :keyword network_profile: The network configuration profile. + :paramtype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceNetworkProfile + :keyword aad_profile: The Azure Active Directory configuration. + :paramtype aad_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :keyword auto_upgrade_profile: The auto upgrade configuration. + :paramtype auto_upgrade_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAutoUpgradeProfile + :keyword upgrade_settings: Settings for upgrading a cluster. + :paramtype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.ClusterUpgradeSettings + :keyword auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled. + :paramtype auto_scaler_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPropertiesAutoScalerProfile + :keyword api_server_access_profile: The access profile for managed cluster API server. + :paramtype api_server_access_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAPIServerAccessProfile + :keyword disk_encryption_set_id: This is of the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. # pylint: disable=line-too-long + :paramtype disk_encryption_set_id: str + :keyword identity_profile: Identities associated with the cluster. + :paramtype identity_profile: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity] + :keyword private_link_resources: Private link resources associated with the cluster. + :paramtype private_link_resources: + list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + :keyword disable_local_accounts: If set to true, getting static credentials will be disabled + for this cluster. This must only be used on Managed Clusters that are AAD enabled. For more + details see `disable local accounts + `_. + :paramtype disable_local_accounts: bool + :keyword http_proxy_config: Configurations for provisioning the cluster with HTTP proxy + servers. + :paramtype http_proxy_config: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterHTTPProxyConfig + :keyword security_profile: Security profile for the managed cluster. + :paramtype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfile + :keyword storage_profile: Storage profile for the managed cluster. + :paramtype storage_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfile + :keyword ingress_profile: Ingress profile for the managed cluster. + :paramtype ingress_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfile + :keyword public_network_access: Allow or deny public network access for AKS. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.containerservice.v2024_07_01.models.PublicNetworkAccess + :keyword workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster. + :paramtype workload_auto_scaler_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfile + :keyword azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed + cluster. + :paramtype azure_monitor_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfile + :keyword service_mesh_profile: Service mesh profile for a managed cluster. + :paramtype service_mesh_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshProfile + :keyword metrics_profile: Optional cluster metrics configuration. + :paramtype metrics_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterMetricsProfile + """ + super().__init__(tags=tags, location=location, **kwargs) + self.sku = sku + self.extended_location = extended_location + self.identity = identity + self.provisioning_state = None + self.power_state = None + self.max_agent_pools = None + self.kubernetes_version = kubernetes_version + self.current_kubernetes_version = None + self.dns_prefix = dns_prefix + self.fqdn_subdomain = fqdn_subdomain + self.fqdn = None + self.private_fqdn = None + self.azure_portal_fqdn = None + self.agent_pool_profiles = agent_pool_profiles + self.linux_profile = linux_profile + self.windows_profile = windows_profile + self.service_principal_profile = service_principal_profile + self.addon_profiles = addon_profiles + self.pod_identity_profile = pod_identity_profile + self.oidc_issuer_profile = oidc_issuer_profile + self.node_resource_group = node_resource_group + self.enable_rbac = enable_rbac + self.support_plan = support_plan + self.enable_pod_security_policy = enable_pod_security_policy + self.network_profile = network_profile + self.aad_profile = aad_profile + self.auto_upgrade_profile = auto_upgrade_profile + self.upgrade_settings = upgrade_settings + self.auto_scaler_profile = auto_scaler_profile + self.api_server_access_profile = api_server_access_profile + self.disk_encryption_set_id = disk_encryption_set_id + self.identity_profile = identity_profile + self.private_link_resources = private_link_resources + self.disable_local_accounts = disable_local_accounts + self.http_proxy_config = http_proxy_config + self.security_profile = security_profile + self.storage_profile = storage_profile + self.ingress_profile = ingress_profile + self.public_network_access = public_network_access + self.workload_auto_scaler_profile = workload_auto_scaler_profile + self.azure_monitor_profile = azure_monitor_profile + self.service_mesh_profile = service_mesh_profile + self.resource_uid = None + self.metrics_profile = metrics_profile + + +class ManagedClusterAADProfile(_serialization.Model): + """For more details see `managed AAD on AKS `_. + + :ivar managed: Whether to enable managed AAD. + :vartype managed: bool + :ivar enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization. + :vartype enable_azure_rbac: bool + :ivar admin_group_object_i_ds: The list of AAD group object IDs that will have admin role of + the cluster. + :vartype admin_group_object_i_ds: list[str] + :ivar client_app_id: (DEPRECATED) The client AAD application ID. Learn more at + https://aka.ms/aks/aad-legacy. + :vartype client_app_id: str + :ivar server_app_id: (DEPRECATED) The server AAD application ID. Learn more at + https://aka.ms/aks/aad-legacy. + :vartype server_app_id: str + :ivar server_app_secret: (DEPRECATED) The server AAD application secret. Learn more at + https://aka.ms/aks/aad-legacy. + :vartype server_app_secret: str + :ivar tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the + tenant of the deployment subscription. + :vartype tenant_id: str + """ + + _attribute_map = { + "managed": {"key": "managed", "type": "bool"}, + "enable_azure_rbac": {"key": "enableAzureRBAC", "type": "bool"}, + "admin_group_object_i_ds": {"key": "adminGroupObjectIDs", "type": "[str]"}, + "client_app_id": {"key": "clientAppID", "type": "str"}, + "server_app_id": {"key": "serverAppID", "type": "str"}, + "server_app_secret": {"key": "serverAppSecret", "type": "str"}, + "tenant_id": {"key": "tenantID", "type": "str"}, + } + + def __init__( + self, + *, + managed: Optional[bool] = None, + enable_azure_rbac: Optional[bool] = None, + admin_group_object_i_ds: Optional[List[str]] = None, + client_app_id: Optional[str] = None, + server_app_id: Optional[str] = None, + server_app_secret: Optional[str] = None, + tenant_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword managed: Whether to enable managed AAD. + :paramtype managed: bool + :keyword enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization. + :paramtype enable_azure_rbac: bool + :keyword admin_group_object_i_ds: The list of AAD group object IDs that will have admin role of + the cluster. + :paramtype admin_group_object_i_ds: list[str] + :keyword client_app_id: (DEPRECATED) The client AAD application ID. Learn more at + https://aka.ms/aks/aad-legacy. + :paramtype client_app_id: str + :keyword server_app_id: (DEPRECATED) The server AAD application ID. Learn more at + https://aka.ms/aks/aad-legacy. + :paramtype server_app_id: str + :keyword server_app_secret: (DEPRECATED) The server AAD application secret. Learn more at + https://aka.ms/aks/aad-legacy. + :paramtype server_app_secret: str + :keyword tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the + tenant of the deployment subscription. + :paramtype tenant_id: str + """ + super().__init__(**kwargs) + self.managed = managed + self.enable_azure_rbac = enable_azure_rbac + self.admin_group_object_i_ds = admin_group_object_i_ds + self.client_app_id = client_app_id + self.server_app_id = server_app_id + self.server_app_secret = server_app_secret + self.tenant_id = tenant_id + + +class ManagedClusterAccessProfile(TrackedResource): + """Managed cluster Access Profile. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + :ivar kube_config: Base64-encoded Kubernetes configuration file. + :vartype kube_config: bytes + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "location": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "tags": {"key": "tags", "type": "{str}"}, + "location": {"key": "location", "type": "str"}, + "kube_config": {"key": "properties.kubeConfig", "type": "bytearray"}, + } + + def __init__( + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + kube_config: Optional[bytes] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword location: The geo-location where the resource lives. Required. + :paramtype location: str + :keyword kube_config: Base64-encoded Kubernetes configuration file. + :paramtype kube_config: bytes + """ + super().__init__(tags=tags, location=location, **kwargs) + self.kube_config = kube_config + + +class ManagedClusterAddonProfile(_serialization.Model): + """A Kubernetes add-on profile for a managed cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar enabled: Whether the add-on is enabled or not. Required. + :vartype enabled: bool + :ivar config: Key-value pairs for configuring an add-on. + :vartype config: dict[str, str] + :ivar identity: Information of user assigned identity used by this add-on. + :vartype identity: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfileIdentity + """ + + _validation = { + "enabled": {"required": True}, + "identity": {"readonly": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "config": {"key": "config", "type": "{str}"}, + "identity": {"key": "identity", "type": "ManagedClusterAddonProfileIdentity"}, + } + + def __init__(self, *, enabled: bool, config: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether the add-on is enabled or not. Required. + :paramtype enabled: bool + :keyword config: Key-value pairs for configuring an add-on. + :paramtype config: dict[str, str] + """ + super().__init__(**kwargs) + self.enabled = enabled + self.config = config + self.identity = None + + +class UserAssignedIdentity(_serialization.Model): + """Details about a user assigned identity. + + :ivar resource_id: The resource ID of the user assigned identity. + :vartype resource_id: str + :ivar client_id: The client ID of the user assigned identity. + :vartype client_id: str + :ivar object_id: The object ID of the user assigned identity. + :vartype object_id: str + """ + + _attribute_map = { + "resource_id": {"key": "resourceId", "type": "str"}, + "client_id": {"key": "clientId", "type": "str"}, + "object_id": {"key": "objectId", "type": "str"}, + } + + def __init__( + self, + *, + resource_id: Optional[str] = None, + client_id: Optional[str] = None, + object_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword resource_id: The resource ID of the user assigned identity. + :paramtype resource_id: str + :keyword client_id: The client ID of the user assigned identity. + :paramtype client_id: str + :keyword object_id: The object ID of the user assigned identity. + :paramtype object_id: str + """ + super().__init__(**kwargs) + self.resource_id = resource_id + self.client_id = client_id + self.object_id = object_id + + +class ManagedClusterAddonProfileIdentity(UserAssignedIdentity): + """Information of user assigned identity used by this add-on. + + :ivar resource_id: The resource ID of the user assigned identity. + :vartype resource_id: str + :ivar client_id: The client ID of the user assigned identity. + :vartype client_id: str + :ivar object_id: The object ID of the user assigned identity. + :vartype object_id: str + """ + + +class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Properties for the container service agent pool profile. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :vartype count: int + :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute + resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted + VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :vartype vm_size: str + :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine + in the master/agent pool. If you specify 0, it will apply the default osDisk size according to + the vmSize specified. + :vartype os_disk_size_gb: int + :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data + root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :vartype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :ivar workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :vartype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. + If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just + nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype vnet_subnet_id: str + :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype pod_subnet_id: str + :ivar max_pods: The maximum number of pods that can run on a node. + :vartype max_pods: int + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is + Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :ivar max_count: The maximum number of nodes for auto-scaling. + :vartype max_count: int + :ivar min_count: The minimum number of nodes for auto-scaling. + :vartype min_count: int + :ivar enable_auto_scaling: Whether to enable auto-scaler. + :vartype enable_auto_scaling: bool + :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it + defaults to Delete. Known values are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and + "AvailabilitySet". + :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional + information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :vartype orchestrator_version: str + :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version + , this field will be exactly equal to it. If orchestratorVersion is + , this field will contain the full version being used. + :vartype current_orchestrator_version: str + :ivar node_image_version: The version of node image. + :vartype node_image_version: str + :ivar upgrade_settings: Settings for upgrading the agentpool. + :vartype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :ivar provisioning_state: The current deployment or provisioning state. + :vartype provisioning_state: str + :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool + can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and + does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :vartype availability_zones: list[str] + :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their + own dedicated public IP addresses. A common scenario is for gaming workloads, where a console + needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. + The default is false. + :vartype enable_node_public_ip: bool + :ivar node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :vartype node_public_ip_prefix_id: str + :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default + is 'Regular'. Known values are: "Spot" and "Regular". + :vartype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :vartype spot_max_price: float + :ivar tags: The tags to be persisted on the agent pool virtual machine scale set. + :vartype tags: dict[str, str] + :ivar node_labels: The node labels to be persisted across all nodes in agent pool. + :vartype node_labels: dict[str, str] + :ivar node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :vartype node_taints: list[str] + :ivar proximity_placement_group_id: The ID for Proximity Placement Group. + :vartype proximity_placement_group_id: str + :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. + :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :ivar linux_os_config: The OS configuration of Linux agent nodes. + :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype enable_encryption_at_host: bool + :ivar enable_ultra_ssd: Whether to enable UltraSSD. + :vartype enable_ultra_ssd: bool + :ivar enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :vartype enable_fips: bool + :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile + for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". + :vartype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool + will be created/upgraded using a snapshot. + :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :vartype capacity_reservation_group_id: str + :ivar host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :vartype host_group_id: str + :ivar network_profile: Network-related settings of an agent pool. + :vartype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :ivar windows_profile: The Windows agent pool's specific profile. + :vartype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :ivar security_profile: The security settings of an agent pool. + :vartype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + """ + + _validation = { + "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, + "current_orchestrator_version": {"readonly": True}, + "node_image_version": {"readonly": True}, + "provisioning_state": {"readonly": True}, + } + + _attribute_map = { + "count": {"key": "count", "type": "int"}, + "vm_size": {"key": "vmSize", "type": "str"}, + "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"}, + "os_disk_type": {"key": "osDiskType", "type": "str"}, + "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"}, + "workload_runtime": {"key": "workloadRuntime", "type": "str"}, + "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"}, + "pod_subnet_id": {"key": "podSubnetID", "type": "str"}, + "max_pods": {"key": "maxPods", "type": "int"}, + "os_type": {"key": "osType", "type": "str"}, + "os_sku": {"key": "osSKU", "type": "str"}, + "max_count": {"key": "maxCount", "type": "int"}, + "min_count": {"key": "minCount", "type": "int"}, + "enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"}, + "scale_down_mode": {"key": "scaleDownMode", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "mode": {"key": "mode", "type": "str"}, + "orchestrator_version": {"key": "orchestratorVersion", "type": "str"}, + "current_orchestrator_version": {"key": "currentOrchestratorVersion", "type": "str"}, + "node_image_version": {"key": "nodeImageVersion", "type": "str"}, + "upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"}, + "provisioning_state": {"key": "provisioningState", "type": "str"}, + "power_state": {"key": "powerState", "type": "PowerState"}, + "availability_zones": {"key": "availabilityZones", "type": "[str]"}, + "enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"}, + "node_public_ip_prefix_id": {"key": "nodePublicIPPrefixID", "type": "str"}, + "scale_set_priority": {"key": "scaleSetPriority", "type": "str"}, + "scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"}, + "spot_max_price": {"key": "spotMaxPrice", "type": "float"}, + "tags": {"key": "tags", "type": "{str}"}, + "node_labels": {"key": "nodeLabels", "type": "{str}"}, + "node_taints": {"key": "nodeTaints", "type": "[str]"}, + "proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"}, + "kubelet_config": {"key": "kubeletConfig", "type": "KubeletConfig"}, + "linux_os_config": {"key": "linuxOSConfig", "type": "LinuxOSConfig"}, + "enable_encryption_at_host": {"key": "enableEncryptionAtHost", "type": "bool"}, + "enable_ultra_ssd": {"key": "enableUltraSSD", "type": "bool"}, + "enable_fips": {"key": "enableFIPS", "type": "bool"}, + "gpu_instance_profile": {"key": "gpuInstanceProfile", "type": "str"}, + "creation_data": {"key": "creationData", "type": "CreationData"}, + "capacity_reservation_group_id": {"key": "capacityReservationGroupID", "type": "str"}, + "host_group_id": {"key": "hostGroupID", "type": "str"}, + "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"}, + "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"}, + "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + count: Optional[int] = None, + vm_size: Optional[str] = None, + os_disk_size_gb: Optional[int] = None, + os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, + kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, + workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + vnet_subnet_id: Optional[str] = None, + pod_subnet_id: Optional[str] = None, + max_pods: Optional[int] = None, + os_type: Union[str, "_models.OSType"] = "Linux", + os_sku: Optional[Union[str, "_models.OSSKU"]] = None, + max_count: Optional[int] = None, + min_count: Optional[int] = None, + enable_auto_scaling: Optional[bool] = None, + scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None, + type: Optional[Union[str, "_models.AgentPoolType"]] = None, + mode: Optional[Union[str, "_models.AgentPoolMode"]] = None, + orchestrator_version: Optional[str] = None, + upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, + power_state: Optional["_models.PowerState"] = None, + availability_zones: Optional[List[str]] = None, + enable_node_public_ip: Optional[bool] = None, + node_public_ip_prefix_id: Optional[str] = None, + scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", + scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", + spot_max_price: float = -1, + tags: Optional[Dict[str, str]] = None, + node_labels: Optional[Dict[str, str]] = None, + node_taints: Optional[List[str]] = None, + proximity_placement_group_id: Optional[str] = None, + kubelet_config: Optional["_models.KubeletConfig"] = None, + linux_os_config: Optional["_models.LinuxOSConfig"] = None, + enable_encryption_at_host: Optional[bool] = None, + enable_ultra_ssd: Optional[bool] = None, + enable_fips: Optional[bool] = None, + gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None, + creation_data: Optional["_models.CreationData"] = None, + capacity_reservation_group_id: Optional[str] = None, + host_group_id: Optional[str] = None, + network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, + windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, + security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :paramtype count: int + :keyword vm_size: VM size availability varies by region. If a node contains insufficient + compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on + restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :paramtype vm_size: str + :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every + machine in the master/agent pool. If you specify 0, it will apply the default osDisk size + according to the vmSize specified. + :paramtype os_disk_size_gb: int + :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime + data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :paramtype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :keyword workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :paramtype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and + used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to + just nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype vnet_subnet_id: str + :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype pod_subnet_id: str + :keyword max_pods: The maximum number of pods that can run on a node. + :paramtype max_pods: int + :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" + and "Windows". + :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType + is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :keyword max_count: The maximum number of nodes for auto-scaling. + :paramtype max_count: int + :keyword min_count: The minimum number of nodes for auto-scaling. + :paramtype min_count: int + :keyword enable_auto_scaling: Whether to enable auto-scaler. + :paramtype enable_auto_scaling: bool + :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, + it defaults to Delete. Known values are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and + "AvailabilitySet". + :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For + additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :paramtype orchestrator_version: str + :keyword upgrade_settings: Settings for upgrading the agentpool. + :paramtype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent + Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs + and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :keyword availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :paramtype availability_zones: list[str] + :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive + their own dedicated public IP addresses. A common scenario is for gaming workloads, where a + console needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. # pylint: disable=line-too-long + The default is false. + :paramtype enable_node_public_ip: bool + :keyword node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :paramtype node_public_ip_prefix_id: str + :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the + default is 'Regular'. Known values are: "Spot" and "Regular". + :paramtype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :paramtype spot_max_price: float + :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. + :paramtype tags: dict[str, str] + :keyword node_labels: The node labels to be persisted across all nodes in agent pool. + :paramtype node_labels: dict[str, str] + :keyword node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :paramtype node_taints: list[str] + :keyword proximity_placement_group_id: The ID for Proximity Placement Group. + :paramtype proximity_placement_group_id: str + :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. + :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :keyword linux_os_config: The OS configuration of Linux agent nodes. + :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype enable_encryption_at_host: bool + :keyword enable_ultra_ssd: Whether to enable UltraSSD. + :paramtype enable_ultra_ssd: bool + :keyword enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :paramtype enable_fips: bool + :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance + profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and + "MIG7g". + :paramtype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node + pool will be created/upgraded using a snapshot. + :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :paramtype capacity_reservation_group_id: str + :keyword host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :paramtype host_group_id: str + :keyword network_profile: Network-related settings of an agent pool. + :paramtype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :keyword windows_profile: The Windows agent pool's specific profile. + :paramtype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :keyword security_profile: The security settings of an agent pool. + :paramtype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + """ + super().__init__(**kwargs) + self.count = count + self.vm_size = vm_size + self.os_disk_size_gb = os_disk_size_gb + self.os_disk_type = os_disk_type + self.kubelet_disk_type = kubelet_disk_type + self.workload_runtime = workload_runtime + self.vnet_subnet_id = vnet_subnet_id + self.pod_subnet_id = pod_subnet_id + self.max_pods = max_pods + self.os_type = os_type + self.os_sku = os_sku + self.max_count = max_count + self.min_count = min_count + self.enable_auto_scaling = enable_auto_scaling + self.scale_down_mode = scale_down_mode + self.type = type + self.mode = mode + self.orchestrator_version = orchestrator_version + self.current_orchestrator_version = None + self.node_image_version = None + self.upgrade_settings = upgrade_settings + self.provisioning_state = None + self.power_state = power_state + self.availability_zones = availability_zones + self.enable_node_public_ip = enable_node_public_ip + self.node_public_ip_prefix_id = node_public_ip_prefix_id + self.scale_set_priority = scale_set_priority + self.scale_set_eviction_policy = scale_set_eviction_policy + self.spot_max_price = spot_max_price + self.tags = tags + self.node_labels = node_labels + self.node_taints = node_taints + self.proximity_placement_group_id = proximity_placement_group_id + self.kubelet_config = kubelet_config + self.linux_os_config = linux_os_config + self.enable_encryption_at_host = enable_encryption_at_host + self.enable_ultra_ssd = enable_ultra_ssd + self.enable_fips = enable_fips + self.gpu_instance_profile = gpu_instance_profile + self.creation_data = creation_data + self.capacity_reservation_group_id = capacity_reservation_group_id + self.host_group_id = host_group_id + self.network_profile = network_profile + self.windows_profile = windows_profile + self.security_profile = security_profile + + +class ManagedClusterAgentPoolProfile( + ManagedClusterAgentPoolProfileProperties +): # pylint: disable=too-many-instance-attributes + """Profile for the container service agent pool. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :vartype count: int + :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute + resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted + VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :vartype vm_size: str + :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine + in the master/agent pool. If you specify 0, it will apply the default osDisk size according to + the vmSize specified. + :vartype os_disk_size_gb: int + :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data + root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :vartype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :ivar workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :vartype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. + If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just + nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype vnet_subnet_id: str + :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :vartype pod_subnet_id: str + :ivar max_pods: The maximum number of pods that can run on a node. + :vartype max_pods: int + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is + Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :ivar max_count: The maximum number of nodes for auto-scaling. + :vartype max_count: int + :ivar min_count: The minimum number of nodes for auto-scaling. + :vartype min_count: int + :ivar enable_auto_scaling: Whether to enable auto-scaler. + :vartype enable_auto_scaling: bool + :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it + defaults to Delete. Known values are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and + "AvailabilitySet". + :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional + information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :vartype orchestrator_version: str + :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version + , this field will be exactly equal to it. If orchestratorVersion is + , this field will contain the full version being used. + :vartype current_orchestrator_version: str + :ivar node_image_version: The version of node image. + :vartype node_image_version: str + :ivar upgrade_settings: Settings for upgrading the agentpool. + :vartype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :ivar provisioning_state: The current deployment or provisioning state. + :vartype provisioning_state: str + :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool + can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and + does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :vartype availability_zones: list[str] + :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their + own dedicated public IP addresses. A common scenario is for gaming workloads, where a console + needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. + The default is false. + :vartype enable_node_public_ip: bool + :ivar node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :vartype node_public_ip_prefix_id: str + :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default + is 'Regular'. Known values are: "Spot" and "Regular". + :vartype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :vartype spot_max_price: float + :ivar tags: The tags to be persisted on the agent pool virtual machine scale set. + :vartype tags: dict[str, str] + :ivar node_labels: The node labels to be persisted across all nodes in agent pool. + :vartype node_labels: dict[str, str] + :ivar node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :vartype node_taints: list[str] + :ivar proximity_placement_group_id: The ID for Proximity Placement Group. + :vartype proximity_placement_group_id: str + :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. + :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :ivar linux_os_config: The OS configuration of Linux agent nodes. + :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype enable_encryption_at_host: bool + :ivar enable_ultra_ssd: Whether to enable UltraSSD. + :vartype enable_ultra_ssd: bool + :ivar enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :vartype enable_fips: bool + :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile + for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". + :vartype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool + will be created/upgraded using a snapshot. + :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :vartype capacity_reservation_group_id: str + :ivar host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :vartype host_group_id: str + :ivar network_profile: Network-related settings of an agent pool. + :vartype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :ivar windows_profile: The Windows agent pool's specific profile. + :vartype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :ivar security_profile: The security settings of an agent pool. + :vartype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + :ivar name: Windows agent pool names must be 6 characters or less. Required. + :vartype name: str + """ + + _validation = { + "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, + "current_orchestrator_version": {"readonly": True}, + "node_image_version": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "name": {"required": True, "pattern": r"^[a-z][a-z0-9]{0,11}$"}, + } + + _attribute_map = { + "count": {"key": "count", "type": "int"}, + "vm_size": {"key": "vmSize", "type": "str"}, + "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"}, + "os_disk_type": {"key": "osDiskType", "type": "str"}, + "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"}, + "workload_runtime": {"key": "workloadRuntime", "type": "str"}, + "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"}, + "pod_subnet_id": {"key": "podSubnetID", "type": "str"}, + "max_pods": {"key": "maxPods", "type": "int"}, + "os_type": {"key": "osType", "type": "str"}, + "os_sku": {"key": "osSKU", "type": "str"}, + "max_count": {"key": "maxCount", "type": "int"}, + "min_count": {"key": "minCount", "type": "int"}, + "enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"}, + "scale_down_mode": {"key": "scaleDownMode", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "mode": {"key": "mode", "type": "str"}, + "orchestrator_version": {"key": "orchestratorVersion", "type": "str"}, + "current_orchestrator_version": {"key": "currentOrchestratorVersion", "type": "str"}, + "node_image_version": {"key": "nodeImageVersion", "type": "str"}, + "upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"}, + "provisioning_state": {"key": "provisioningState", "type": "str"}, + "power_state": {"key": "powerState", "type": "PowerState"}, + "availability_zones": {"key": "availabilityZones", "type": "[str]"}, + "enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"}, + "node_public_ip_prefix_id": {"key": "nodePublicIPPrefixID", "type": "str"}, + "scale_set_priority": {"key": "scaleSetPriority", "type": "str"}, + "scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"}, + "spot_max_price": {"key": "spotMaxPrice", "type": "float"}, + "tags": {"key": "tags", "type": "{str}"}, + "node_labels": {"key": "nodeLabels", "type": "{str}"}, + "node_taints": {"key": "nodeTaints", "type": "[str]"}, + "proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"}, + "kubelet_config": {"key": "kubeletConfig", "type": "KubeletConfig"}, + "linux_os_config": {"key": "linuxOSConfig", "type": "LinuxOSConfig"}, + "enable_encryption_at_host": {"key": "enableEncryptionAtHost", "type": "bool"}, + "enable_ultra_ssd": {"key": "enableUltraSSD", "type": "bool"}, + "enable_fips": {"key": "enableFIPS", "type": "bool"}, + "gpu_instance_profile": {"key": "gpuInstanceProfile", "type": "str"}, + "creation_data": {"key": "creationData", "type": "CreationData"}, + "capacity_reservation_group_id": {"key": "capacityReservationGroupID", "type": "str"}, + "host_group_id": {"key": "hostGroupID", "type": "str"}, + "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"}, + "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"}, + "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"}, + "name": {"key": "name", "type": "str"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + name: str, + count: Optional[int] = None, + vm_size: Optional[str] = None, + os_disk_size_gb: Optional[int] = None, + os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, + kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, + workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + vnet_subnet_id: Optional[str] = None, + pod_subnet_id: Optional[str] = None, + max_pods: Optional[int] = None, + os_type: Union[str, "_models.OSType"] = "Linux", + os_sku: Optional[Union[str, "_models.OSSKU"]] = None, + max_count: Optional[int] = None, + min_count: Optional[int] = None, + enable_auto_scaling: Optional[bool] = None, + scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None, + type: Optional[Union[str, "_models.AgentPoolType"]] = None, + mode: Optional[Union[str, "_models.AgentPoolMode"]] = None, + orchestrator_version: Optional[str] = None, + upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, + power_state: Optional["_models.PowerState"] = None, + availability_zones: Optional[List[str]] = None, + enable_node_public_ip: Optional[bool] = None, + node_public_ip_prefix_id: Optional[str] = None, + scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", + scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", + spot_max_price: float = -1, + tags: Optional[Dict[str, str]] = None, + node_labels: Optional[Dict[str, str]] = None, + node_taints: Optional[List[str]] = None, + proximity_placement_group_id: Optional[str] = None, + kubelet_config: Optional["_models.KubeletConfig"] = None, + linux_os_config: Optional["_models.LinuxOSConfig"] = None, + enable_encryption_at_host: Optional[bool] = None, + enable_ultra_ssd: Optional[bool] = None, + enable_fips: Optional[bool] = None, + gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None, + creation_data: Optional["_models.CreationData"] = None, + capacity_reservation_group_id: Optional[str] = None, + host_group_id: Optional[str] = None, + network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, + windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, + security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the + range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for + system pools. The default value is 1. + :paramtype count: int + :keyword vm_size: VM size availability varies by region. If a node contains insufficient + compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on + restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :paramtype vm_size: str + :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every + machine in the master/agent pool. If you specify 0, it will apply the default osDisk size + according to the vmSize specified. + :paramtype os_disk_size_gb: int + :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk + larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed + after creation. For more information see `Ephemeral OS + `_. Known values are: + "Managed" and "Ephemeral". + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime + data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". + :paramtype kubelet_disk_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :keyword workload_runtime: Determines the type of workload a node can run. Known values are: + "OCIContainer" and "WasmWasi". + :paramtype workload_runtime: str or + ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime + :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and + used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to + just nodes. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype vnet_subnet_id: str + :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see + vnetSubnetID for more details). This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :paramtype pod_subnet_id: str + :keyword max_pods: The maximum number of pods that can run on a node. + :paramtype max_pods: int + :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" + and "Windows". + :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType + is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :keyword max_count: The maximum number of nodes for auto-scaling. + :paramtype max_count: int + :keyword min_count: The minimum number of nodes for auto-scaling. + :paramtype min_count: int + :keyword enable_auto_scaling: Whether to enable auto-scaler. + :paramtype enable_auto_scaling: bool + :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, + it defaults to Delete. Known values are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and + "AvailabilitySet". + :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType + :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For + additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". + :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode + :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and + (e.g. 1.20) are supported. When is specified, the latest supported + GA patch version is chosen automatically. Updating the cluster with the same once + it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch + version is available. As a best practice, you should upgrade all node pools in an AKS cluster + to the same Kubernetes version. The node pool version must have the same major version as the + control plane. The node pool minor version must be within two minor versions of the control + plane version. The node pool version cannot be greater than the control plane version. For more + information see `upgrading a node pool + `_. + :paramtype orchestrator_version: str + :keyword upgrade_settings: Settings for upgrading the agentpool. + :paramtype upgrade_settings: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent + Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs + and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and + provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :keyword availability_zones: The list of Availability zones to use for nodes. This can only be + specified if the AgentPoolType property is 'VirtualMachineScaleSets'. + :paramtype availability_zones: list[str] + :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive + their own dedicated public IP addresses. A common scenario is for gaming workloads, where a + console needs to make a direct connection to a cloud virtual machine to minimize hops. For more + information see `assigning a public IP per node + `_. # pylint: disable=line-too-long + The default is false. + :paramtype enable_node_public_ip: bool + :keyword node_public_ip_prefix_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :paramtype node_public_ip_prefix_id: str + :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the + default is 'Regular'. Known values are: "Spot" and "Regular". + :paramtype scale_set_priority: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority + :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is + 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_eviction_policy: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy + :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which + indicates the willingness to pay any on-demand price. For more details on spot pricing, see + `spot VMs pricing `_. + :paramtype spot_max_price: float + :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. + :paramtype tags: dict[str, str] + :keyword node_labels: The node labels to be persisted across all nodes in agent pool. + :paramtype node_labels: dict[str, str] + :keyword node_taints: The taints added to new nodes during node pool create and scale. For + example, key=value:NoSchedule. + :paramtype node_taints: list[str] + :keyword proximity_placement_group_id: The ID for Proximity Placement Group. + :paramtype proximity_placement_group_id: str + :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. + :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :keyword linux_os_config: The OS configuration of Linux agent nodes. + :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig + :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain + Azure regions. For more information, see: + https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype enable_encryption_at_host: bool + :keyword enable_ultra_ssd: Whether to enable UltraSSD. + :paramtype enable_ultra_ssd: bool + :keyword enable_fips: See `Add a FIPS-enabled node pool + `_ + for more details. + :paramtype enable_fips: bool + :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance + profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and + "MIG7g". + :paramtype gpu_instance_profile: str or + ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node + pool will be created/upgraded using a snapshot. + :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the + Capacity Reservation Group. + :paramtype capacity_reservation_group_id: str + :keyword host_group_id: This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + For more information see `Azure dedicated hosts + `_. + :paramtype host_group_id: str + :keyword network_profile: Network-related settings of an agent pool. + :paramtype network_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :keyword windows_profile: The Windows agent pool's specific profile. + :paramtype windows_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :keyword security_profile: The security settings of an agent pool. + :paramtype security_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + :keyword name: Windows agent pool names must be 6 characters or less. Required. + :paramtype name: str + """ + super().__init__( + count=count, + vm_size=vm_size, + os_disk_size_gb=os_disk_size_gb, + os_disk_type=os_disk_type, + kubelet_disk_type=kubelet_disk_type, + workload_runtime=workload_runtime, + vnet_subnet_id=vnet_subnet_id, + pod_subnet_id=pod_subnet_id, + max_pods=max_pods, + os_type=os_type, + os_sku=os_sku, + max_count=max_count, + min_count=min_count, + enable_auto_scaling=enable_auto_scaling, + scale_down_mode=scale_down_mode, + type=type, + mode=mode, + orchestrator_version=orchestrator_version, + upgrade_settings=upgrade_settings, + power_state=power_state, + availability_zones=availability_zones, + enable_node_public_ip=enable_node_public_ip, + node_public_ip_prefix_id=node_public_ip_prefix_id, + scale_set_priority=scale_set_priority, + scale_set_eviction_policy=scale_set_eviction_policy, + spot_max_price=spot_max_price, + tags=tags, + node_labels=node_labels, + node_taints=node_taints, + proximity_placement_group_id=proximity_placement_group_id, + kubelet_config=kubelet_config, + linux_os_config=linux_os_config, + enable_encryption_at_host=enable_encryption_at_host, + enable_ultra_ssd=enable_ultra_ssd, + enable_fips=enable_fips, + gpu_instance_profile=gpu_instance_profile, + creation_data=creation_data, + capacity_reservation_group_id=capacity_reservation_group_id, + host_group_id=host_group_id, + network_profile=network_profile, + windows_profile=windows_profile, + security_profile=security_profile, + **kwargs + ) + self.name = name + + +class ManagedClusterAPIServerAccessProfile(_serialization.Model): + """Access profile for managed cluster API server. + + :ivar authorized_ip_ranges: IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. + This feature is not compatible with clusters that use Public IP Per Node, or clusters that are + using a Basic Load Balancer. For more information see `API server authorized IP ranges + `_. + :vartype authorized_ip_ranges: list[str] + :ivar enable_private_cluster: For more details, see `Creating a private AKS cluster + `_. + :vartype enable_private_cluster: bool + :ivar private_dns_zone: The default is System. For more details see `configure private DNS zone + `_. Allowed + values are 'system' and 'none'. + :vartype private_dns_zone: str + :ivar enable_private_cluster_public_fqdn: Whether to create additional public FQDN for private + cluster or not. + :vartype enable_private_cluster_public_fqdn: bool + :ivar disable_run_command: Whether to disable run command for the cluster or not. + :vartype disable_run_command: bool + """ + + _attribute_map = { + "authorized_ip_ranges": {"key": "authorizedIPRanges", "type": "[str]"}, + "enable_private_cluster": {"key": "enablePrivateCluster", "type": "bool"}, + "private_dns_zone": {"key": "privateDNSZone", "type": "str"}, + "enable_private_cluster_public_fqdn": {"key": "enablePrivateClusterPublicFQDN", "type": "bool"}, + "disable_run_command": {"key": "disableRunCommand", "type": "bool"}, + } + + def __init__( + self, + *, + authorized_ip_ranges: Optional[List[str]] = None, + enable_private_cluster: Optional[bool] = None, + private_dns_zone: Optional[str] = None, + enable_private_cluster_public_fqdn: Optional[bool] = None, + disable_run_command: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword authorized_ip_ranges: IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. + This feature is not compatible with clusters that use Public IP Per Node, or clusters that are + using a Basic Load Balancer. For more information see `API server authorized IP ranges + `_. + :paramtype authorized_ip_ranges: list[str] + :keyword enable_private_cluster: For more details, see `Creating a private AKS cluster + `_. + :paramtype enable_private_cluster: bool + :keyword private_dns_zone: The default is System. For more details see `configure private DNS + zone `_. + Allowed values are 'system' and 'none'. + :paramtype private_dns_zone: str + :keyword enable_private_cluster_public_fqdn: Whether to create additional public FQDN for + private cluster or not. + :paramtype enable_private_cluster_public_fqdn: bool + :keyword disable_run_command: Whether to disable run command for the cluster or not. + :paramtype disable_run_command: bool + """ + super().__init__(**kwargs) + self.authorized_ip_ranges = authorized_ip_ranges + self.enable_private_cluster = enable_private_cluster + self.private_dns_zone = private_dns_zone + self.enable_private_cluster_public_fqdn = enable_private_cluster_public_fqdn + self.disable_run_command = disable_run_command + + +class ManagedClusterAutoUpgradeProfile(_serialization.Model): + """Auto upgrade profile for a managed cluster. + + :ivar upgrade_channel: For more information see `setting the AKS cluster auto-upgrade channel + `_. Known values + are: "rapid", "stable", "patch", "node-image", and "none". + :vartype upgrade_channel: str or ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeChannel + :ivar node_os_upgrade_channel: Manner in which the OS on your nodes is updated. The default is + NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and "SecurityPatch". + :vartype node_os_upgrade_channel: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NodeOSUpgradeChannel + """ + + _attribute_map = { + "upgrade_channel": {"key": "upgradeChannel", "type": "str"}, + "node_os_upgrade_channel": {"key": "nodeOSUpgradeChannel", "type": "str"}, + } + + def __init__( + self, + *, + upgrade_channel: Optional[Union[str, "_models.UpgradeChannel"]] = None, + node_os_upgrade_channel: Optional[Union[str, "_models.NodeOSUpgradeChannel"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword upgrade_channel: For more information see `setting the AKS cluster auto-upgrade + channel `_. + Known values are: "rapid", "stable", "patch", "node-image", and "none". + :paramtype upgrade_channel: str or + ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeChannel + :keyword node_os_upgrade_channel: Manner in which the OS on your nodes is updated. The default + is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and "SecurityPatch". + :paramtype node_os_upgrade_channel: str or + ~azure.mgmt.containerservice.v2024_07_01.models.NodeOSUpgradeChannel + """ + super().__init__(**kwargs) + self.upgrade_channel = upgrade_channel + self.node_os_upgrade_channel = node_os_upgrade_channel + + +class ManagedClusterAzureMonitorProfile(_serialization.Model): + """Azure Monitor addon profiles for monitoring the managed cluster. + + :ivar metrics: Metrics profile for the Azure Monitor managed service for Prometheus addon. + Collect out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace + and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an + overview. + :vartype metrics: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileMetrics + """ + + _attribute_map = { + "metrics": {"key": "metrics", "type": "ManagedClusterAzureMonitorProfileMetrics"}, + } + + def __init__( + self, *, metrics: Optional["_models.ManagedClusterAzureMonitorProfileMetrics"] = None, **kwargs: Any + ) -> None: + """ + :keyword metrics: Metrics profile for the Azure Monitor managed service for Prometheus addon. + Collect out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace + and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an + overview. + :paramtype metrics: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileMetrics + """ + super().__init__(**kwargs) + self.metrics = metrics + + +class ManagedClusterAzureMonitorProfileKubeStateMetrics(_serialization.Model): # pylint: disable=name-too-long + """Kube State Metrics profile for the Azure Managed Prometheus addon. These optional settings are + for the kube-state-metrics pod that is deployed with the addon. See + aka.ms/AzureManagedPrometheus-optional-parameters for details. + + :ivar metric_labels_allowlist: Comma-separated list of additional Kubernetes label keys that + will be used in the resource's labels metric (Example: + 'namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...'). By default the metric contains only + resource name and namespace labels. + :vartype metric_labels_allowlist: str + :ivar metric_annotations_allow_list: Comma-separated list of Kubernetes annotation keys that + will be used in the resource's labels metric (Example: + 'namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...'). By default the metric + contains only resource name and namespace labels. + :vartype metric_annotations_allow_list: str + """ + + _attribute_map = { + "metric_labels_allowlist": {"key": "metricLabelsAllowlist", "type": "str"}, + "metric_annotations_allow_list": {"key": "metricAnnotationsAllowList", "type": "str"}, + } + + def __init__( + self, + *, + metric_labels_allowlist: Optional[str] = None, + metric_annotations_allow_list: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword metric_labels_allowlist: Comma-separated list of additional Kubernetes label keys that + will be used in the resource's labels metric (Example: + 'namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...'). By default the metric contains only + resource name and namespace labels. + :paramtype metric_labels_allowlist: str + :keyword metric_annotations_allow_list: Comma-separated list of Kubernetes annotation keys that + will be used in the resource's labels metric (Example: + 'namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...'). By default the metric + contains only resource name and namespace labels. + :paramtype metric_annotations_allow_list: str + """ + super().__init__(**kwargs) + self.metric_labels_allowlist = metric_labels_allowlist + self.metric_annotations_allow_list = metric_annotations_allow_list + + +class ManagedClusterAzureMonitorProfileMetrics(_serialization.Model): + """Metrics profile for the Azure Monitor managed service for Prometheus addon. Collect + out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace and + configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an + overview. + + All required parameters must be populated in order to send to server. + + :ivar enabled: Whether to enable or disable the Azure Managed Prometheus addon for Prometheus + monitoring. See aka.ms/AzureManagedPrometheus-aks-enable for details on enabling and disabling. + Required. + :vartype enabled: bool + :ivar kube_state_metrics: Kube State Metrics profile for the Azure Managed Prometheus addon. + These optional settings are for the kube-state-metrics pod that is deployed with the addon. See + aka.ms/AzureManagedPrometheus-optional-parameters for details. + :vartype kube_state_metrics: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileKubeStateMetrics + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "kube_state_metrics": {"key": "kubeStateMetrics", "type": "ManagedClusterAzureMonitorProfileKubeStateMetrics"}, + } + + def __init__( + self, + *, + enabled: bool, + kube_state_metrics: Optional["_models.ManagedClusterAzureMonitorProfileKubeStateMetrics"] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Whether to enable or disable the Azure Managed Prometheus addon for + Prometheus monitoring. See aka.ms/AzureManagedPrometheus-aks-enable for details on enabling and + disabling. Required. + :paramtype enabled: bool + :keyword kube_state_metrics: Kube State Metrics profile for the Azure Managed Prometheus addon. + These optional settings are for the kube-state-metrics pod that is deployed with the addon. See + aka.ms/AzureManagedPrometheus-optional-parameters for details. + :paramtype kube_state_metrics: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileKubeStateMetrics + """ + super().__init__(**kwargs) + self.enabled = enabled + self.kube_state_metrics = kube_state_metrics + + +class ManagedClusterCostAnalysis(_serialization.Model): + """The cost analysis configuration for the cluster. + + :ivar enabled: The Managed Cluster sku.tier must be set to 'Standard' or 'Premium' to enable + this feature. Enabling this will add Kubernetes Namespace and Deployment details to the Cost + Analysis views in the Azure portal. If not specified, the default is false. For more + information see aka.ms/aks/docs/cost-analysis. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: The Managed Cluster sku.tier must be set to 'Standard' or 'Premium' to enable + this feature. Enabling this will add Kubernetes Namespace and Deployment details to the Cost + Analysis views in the Azure portal. If not specified, the default is false. For more + information see aka.ms/aks/docs/cost-analysis. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterHTTPProxyConfig(_serialization.Model): + """Cluster HTTP proxy configuration. + + :ivar http_proxy: The HTTP proxy server endpoint to use. + :vartype http_proxy: str + :ivar https_proxy: The HTTPS proxy server endpoint to use. + :vartype https_proxy: str + :ivar no_proxy: The endpoints that should not go through proxy. + :vartype no_proxy: list[str] + :ivar trusted_ca: Alternative CA cert to use for connecting to proxy servers. + :vartype trusted_ca: str + """ + + _attribute_map = { + "http_proxy": {"key": "httpProxy", "type": "str"}, + "https_proxy": {"key": "httpsProxy", "type": "str"}, + "no_proxy": {"key": "noProxy", "type": "[str]"}, + "trusted_ca": {"key": "trustedCa", "type": "str"}, + } + + def __init__( + self, + *, + http_proxy: Optional[str] = None, + https_proxy: Optional[str] = None, + no_proxy: Optional[List[str]] = None, + trusted_ca: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword http_proxy: The HTTP proxy server endpoint to use. + :paramtype http_proxy: str + :keyword https_proxy: The HTTPS proxy server endpoint to use. + :paramtype https_proxy: str + :keyword no_proxy: The endpoints that should not go through proxy. + :paramtype no_proxy: list[str] + :keyword trusted_ca: Alternative CA cert to use for connecting to proxy servers. + :paramtype trusted_ca: str + """ + super().__init__(**kwargs) + self.http_proxy = http_proxy + self.https_proxy = https_proxy + self.no_proxy = no_proxy + self.trusted_ca = trusted_ca + + +class ManagedClusterIdentity(_serialization.Model): + """Identity for the managed cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar principal_id: The principal id of the system assigned identity which is used by master + components. + :vartype principal_id: str + :ivar tenant_id: The tenant id of the system assigned identity which is used by master + components. + :vartype tenant_id: str + :ivar type: For more information see `use managed identities in AKS + `_. Known values are: + "SystemAssigned", "UserAssigned", and "None". + :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ResourceIdentityType + :ivar delegated_resources: The delegated identity resources assigned to this managed cluster. + This can only be set by another Azure Resource Provider, and managed cluster only accept one + delegated identity resource. Internal use only. + :vartype delegated_resources: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.DelegatedResource] + :ivar user_assigned_identities: The keys must be ARM resource IDs in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + :vartype user_assigned_identities: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedServiceIdentityUserAssignedIdentitiesValue] + """ + + _validation = { + "principal_id": {"readonly": True}, + "tenant_id": {"readonly": True}, + } + + _attribute_map = { + "principal_id": {"key": "principalId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "delegated_resources": {"key": "delegatedResources", "type": "{DelegatedResource}"}, + "user_assigned_identities": { + "key": "userAssignedIdentities", + "type": "{ManagedServiceIdentityUserAssignedIdentitiesValue}", + }, + } + + def __init__( + self, + *, + type: Optional[Union[str, "_models.ResourceIdentityType"]] = None, + delegated_resources: Optional[Dict[str, "_models.DelegatedResource"]] = None, + user_assigned_identities: Optional[ + Dict[str, "_models.ManagedServiceIdentityUserAssignedIdentitiesValue"] + ] = None, + **kwargs: Any + ) -> None: + """ + :keyword type: For more information see `use managed identities in AKS + `_. Known values are: + "SystemAssigned", "UserAssigned", and "None". + :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ResourceIdentityType + :keyword delegated_resources: The delegated identity resources assigned to this managed + cluster. This can only be set by another Azure Resource Provider, and managed cluster only + accept one delegated identity resource. Internal use only. + :paramtype delegated_resources: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.DelegatedResource] + :keyword user_assigned_identities: The keys must be ARM resource IDs in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + :paramtype user_assigned_identities: dict[str, + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedServiceIdentityUserAssignedIdentitiesValue] + """ + super().__init__(**kwargs) + self.principal_id = None + self.tenant_id = None + self.type = type + self.delegated_resources = delegated_resources + self.user_assigned_identities = user_assigned_identities + + +class ManagedClusterIngressProfile(_serialization.Model): + """Ingress profile for the container service cluster. + + :ivar web_app_routing: App Routing settings for the ingress profile. You can find an overview + and onboarding guide for this feature at + https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default. + :vartype web_app_routing: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfileWebAppRouting + """ + + _attribute_map = { + "web_app_routing": {"key": "webAppRouting", "type": "ManagedClusterIngressProfileWebAppRouting"}, + } + + def __init__( + self, *, web_app_routing: Optional["_models.ManagedClusterIngressProfileWebAppRouting"] = None, **kwargs: Any + ) -> None: + """ + :keyword web_app_routing: App Routing settings for the ingress profile. You can find an + overview and onboarding guide for this feature at + https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default. + :paramtype web_app_routing: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfileWebAppRouting + """ + super().__init__(**kwargs) + self.web_app_routing = web_app_routing + + +class ManagedClusterIngressProfileWebAppRouting(_serialization.Model): # pylint: disable=name-too-long + """Application Routing add-on settings for the ingress profile. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar enabled: Whether to enable the Application Routing add-on. + :vartype enabled: bool + :ivar dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the + Application Routing add-on. Used only when Application Routing add-on is enabled. Public and + private DNS zones can be in different resource groups, but all public DNS zones must be in the + same resource group and all private DNS zones must be in the same resource group. + :vartype dns_zone_resource_ids: list[str] + :ivar identity: Managed identity of the Application Routing add-on. This is the identity that + should be granted permissions, for example, to manage the associated Azure DNS resource and get + certificates from Azure Key Vault. See `this overview of the add-on + `_ for more + instructions. + :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + """ + + _validation = { + "dns_zone_resource_ids": {"max_items": 5, "min_items": 0}, + "identity": {"readonly": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "dns_zone_resource_ids": {"key": "dnsZoneResourceIds", "type": "[str]"}, + "identity": {"key": "identity", "type": "UserAssignedIdentity"}, + } + + def __init__( + self, *, enabled: Optional[bool] = None, dns_zone_resource_ids: Optional[List[str]] = None, **kwargs: Any + ) -> None: + """ + :keyword enabled: Whether to enable the Application Routing add-on. + :paramtype enabled: bool + :keyword dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the + Application Routing add-on. Used only when Application Routing add-on is enabled. Public and + private DNS zones can be in different resource groups, but all public DNS zones must be in the + same resource group and all private DNS zones must be in the same resource group. + :paramtype dns_zone_resource_ids: list[str] + """ + super().__init__(**kwargs) + self.enabled = enabled + self.dns_zone_resource_ids = dns_zone_resource_ids + self.identity = None + + +class ManagedClusterListResult(_serialization.Model): + """The response from the List Managed Clusters operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: The list of managed clusters. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :ivar next_link: The URL to get the next set of managed cluster results. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[ManagedCluster]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.ManagedCluster"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The list of managed clusters. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class ManagedClusterLoadBalancerProfile(_serialization.Model): + """Profile of the managed cluster load balancer. + + :ivar managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer. + :vartype managed_outbound_i_ps: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs + :ivar outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer. + :vartype outbound_ip_prefixes: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes + :ivar outbound_i_ps: Desired outbound IP resources for the cluster load balancer. + :vartype outbound_i_ps: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPs + :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster load + balancer. + :vartype effective_outbound_i_ps: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :ivar allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed + values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in + Azure dynamically allocating ports. + :vartype allocated_outbound_ports: int + :ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values + are in the range of 4 to 120 (inclusive). The default value is 30 minutes. + :vartype idle_timeout_in_minutes: int + :ivar enable_multiple_standard_load_balancers: Enable multiple standard load balancers per AKS + cluster or not. + :vartype enable_multiple_standard_load_balancers: bool + :ivar backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known + values are: "NodeIPConfiguration" and "NodeIP". + :vartype backend_pool_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.BackendPoolType + """ + + _validation = { + "allocated_outbound_ports": {"maximum": 64000, "minimum": 0}, + "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4}, + } + + _attribute_map = { + "managed_outbound_i_ps": { + "key": "managedOutboundIPs", + "type": "ManagedClusterLoadBalancerProfileManagedOutboundIPs", + }, + "outbound_ip_prefixes": { + "key": "outboundIPPrefixes", + "type": "ManagedClusterLoadBalancerProfileOutboundIPPrefixes", + }, + "outbound_i_ps": {"key": "outboundIPs", "type": "ManagedClusterLoadBalancerProfileOutboundIPs"}, + "effective_outbound_i_ps": {"key": "effectiveOutboundIPs", "type": "[ResourceReference]"}, + "allocated_outbound_ports": {"key": "allocatedOutboundPorts", "type": "int"}, + "idle_timeout_in_minutes": {"key": "idleTimeoutInMinutes", "type": "int"}, + "enable_multiple_standard_load_balancers": {"key": "enableMultipleStandardLoadBalancers", "type": "bool"}, + "backend_pool_type": {"key": "backendPoolType", "type": "str"}, + } + + def __init__( + self, + *, + managed_outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs"] = None, + outbound_ip_prefixes: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes"] = None, + outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPs"] = None, + effective_outbound_i_ps: Optional[List["_models.ResourceReference"]] = None, + allocated_outbound_ports: int = 0, + idle_timeout_in_minutes: int = 30, + enable_multiple_standard_load_balancers: Optional[bool] = None, + backend_pool_type: Union[str, "_models.BackendPoolType"] = "NodeIPConfiguration", + **kwargs: Any + ) -> None: + """ + :keyword managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer. + :paramtype managed_outbound_i_ps: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs + :keyword outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load + balancer. + :paramtype outbound_ip_prefixes: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes + :keyword outbound_i_ps: Desired outbound IP resources for the cluster load balancer. + :paramtype outbound_i_ps: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPs + :keyword effective_outbound_i_ps: The effective outbound IP resources of the cluster load + balancer. + :paramtype effective_outbound_i_ps: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :keyword allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed + values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in + Azure dynamically allocating ports. + :paramtype allocated_outbound_ports: int + :keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values + are in the range of 4 to 120 (inclusive). The default value is 30 minutes. + :paramtype idle_timeout_in_minutes: int + :keyword enable_multiple_standard_load_balancers: Enable multiple standard load balancers per + AKS cluster or not. + :paramtype enable_multiple_standard_load_balancers: bool + :keyword backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known + values are: "NodeIPConfiguration" and "NodeIP". + :paramtype backend_pool_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.BackendPoolType + """ + super().__init__(**kwargs) + self.managed_outbound_i_ps = managed_outbound_i_ps + self.outbound_ip_prefixes = outbound_ip_prefixes + self.outbound_i_ps = outbound_i_ps + self.effective_outbound_i_ps = effective_outbound_i_ps + self.allocated_outbound_ports = allocated_outbound_ports + self.idle_timeout_in_minutes = idle_timeout_in_minutes + self.enable_multiple_standard_load_balancers = enable_multiple_standard_load_balancers + self.backend_pool_type = backend_pool_type + + +class ManagedClusterLoadBalancerProfileManagedOutboundIPs(_serialization.Model): # pylint: disable=name-too-long + """Desired managed outbound IPs for the cluster load balancer. + + :ivar count: The desired number of IPv4 outbound IPs created/managed by Azure for the cluster + load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value + is 1. + :vartype count: int + :ivar count_ipv6: The desired number of IPv6 outbound IPs created/managed by Azure for the + cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default + value is 0 for single-stack and 1 for dual-stack. + :vartype count_ipv6: int + """ + + _validation = { + "count": {"maximum": 100, "minimum": 1}, + "count_ipv6": {"maximum": 100, "minimum": 0}, + } + + _attribute_map = { + "count": {"key": "count", "type": "int"}, + "count_ipv6": {"key": "countIPv6", "type": "int"}, + } + + def __init__(self, *, count: int = 1, count_ipv6: int = 0, **kwargs: Any) -> None: + """ + :keyword count: The desired number of IPv4 outbound IPs created/managed by Azure for the + cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default + value is 1. + :paramtype count: int + :keyword count_ipv6: The desired number of IPv6 outbound IPs created/managed by Azure for the + cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default + value is 0 for single-stack and 1 for dual-stack. + :paramtype count_ipv6: int + """ + super().__init__(**kwargs) + self.count = count + self.count_ipv6 = count_ipv6 + + +class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(_serialization.Model): # pylint: disable=name-too-long + """Desired outbound IP Prefix resources for the cluster load balancer. + + :ivar public_ip_prefixes: A list of public IP prefix resources. + :vartype public_ip_prefixes: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + """ + + _attribute_map = { + "public_ip_prefixes": {"key": "publicIPPrefixes", "type": "[ResourceReference]"}, + } + + def __init__( + self, *, public_ip_prefixes: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any + ) -> None: + """ + :keyword public_ip_prefixes: A list of public IP prefix resources. + :paramtype public_ip_prefixes: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + """ + super().__init__(**kwargs) + self.public_ip_prefixes = public_ip_prefixes + + +class ManagedClusterLoadBalancerProfileOutboundIPs(_serialization.Model): # pylint: disable=name-too-long + """Desired outbound IP resources for the cluster load balancer. + + :ivar public_i_ps: A list of public IP resources. + :vartype public_i_ps: list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + """ + + _attribute_map = { + "public_i_ps": {"key": "publicIPs", "type": "[ResourceReference]"}, + } + + def __init__(self, *, public_i_ps: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any) -> None: + """ + :keyword public_i_ps: A list of public IP resources. + :paramtype public_i_ps: list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + """ + super().__init__(**kwargs) + self.public_i_ps = public_i_ps + + +class ManagedClusterManagedOutboundIPProfile(_serialization.Model): + """Profile of the managed outbound IP resources of the managed cluster. + + :ivar count: The desired number of outbound IPs created/managed by Azure. Allowed values must + be in the range of 1 to 16 (inclusive). The default value is 1. + :vartype count: int + """ + + _validation = { + "count": {"maximum": 16, "minimum": 1}, + } + + _attribute_map = { + "count": {"key": "count", "type": "int"}, + } + + def __init__(self, *, count: int = 1, **kwargs: Any) -> None: + """ + :keyword count: The desired number of outbound IPs created/managed by Azure. Allowed values + must be in the range of 1 to 16 (inclusive). The default value is 1. + :paramtype count: int + """ + super().__init__(**kwargs) + self.count = count + + +class ManagedClusterMetricsProfile(_serialization.Model): + """The metrics profile for the ManagedCluster. + + :ivar cost_analysis: The cost analysis configuration for the cluster. + :vartype cost_analysis: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterCostAnalysis + """ + + _attribute_map = { + "cost_analysis": {"key": "costAnalysis", "type": "ManagedClusterCostAnalysis"}, + } + + def __init__(self, *, cost_analysis: Optional["_models.ManagedClusterCostAnalysis"] = None, **kwargs: Any) -> None: + """ + :keyword cost_analysis: The cost analysis configuration for the cluster. + :paramtype cost_analysis: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterCostAnalysis + """ + super().__init__(**kwargs) + self.cost_analysis = cost_analysis + + +class ManagedClusterNATGatewayProfile(_serialization.Model): + """Profile of the managed cluster NAT gateway. + + :ivar managed_outbound_ip_profile: Profile of the managed outbound IP resources of the cluster + NAT gateway. + :vartype managed_outbound_ip_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterManagedOutboundIPProfile + :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster NAT gateway. + :vartype effective_outbound_i_ps: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values + are in the range of 4 to 120 (inclusive). The default value is 4 minutes. + :vartype idle_timeout_in_minutes: int + """ + + _validation = { + "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4}, + } + + _attribute_map = { + "managed_outbound_ip_profile": { + "key": "managedOutboundIPProfile", + "type": "ManagedClusterManagedOutboundIPProfile", + }, + "effective_outbound_i_ps": {"key": "effectiveOutboundIPs", "type": "[ResourceReference]"}, + "idle_timeout_in_minutes": {"key": "idleTimeoutInMinutes", "type": "int"}, + } + + def __init__( + self, + *, + managed_outbound_ip_profile: Optional["_models.ManagedClusterManagedOutboundIPProfile"] = None, + effective_outbound_i_ps: Optional[List["_models.ResourceReference"]] = None, + idle_timeout_in_minutes: int = 4, + **kwargs: Any + ) -> None: + """ + :keyword managed_outbound_ip_profile: Profile of the managed outbound IP resources of the + cluster NAT gateway. + :paramtype managed_outbound_ip_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterManagedOutboundIPProfile + :keyword effective_outbound_i_ps: The effective outbound IP resources of the cluster NAT + gateway. + :paramtype effective_outbound_i_ps: + list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values + are in the range of 4 to 120 (inclusive). The default value is 4 minutes. + :paramtype idle_timeout_in_minutes: int + """ + super().__init__(**kwargs) + self.managed_outbound_ip_profile = managed_outbound_ip_profile + self.effective_outbound_i_ps = effective_outbound_i_ps + self.idle_timeout_in_minutes = idle_timeout_in_minutes + + +class ManagedClusterOIDCIssuerProfile(_serialization.Model): + """The OIDC issuer profile of the Managed Cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar issuer_url: The OIDC issuer url of the Managed Cluster. + :vartype issuer_url: str + :ivar enabled: Whether the OIDC issuer is enabled. + :vartype enabled: bool + """ + + _validation = { + "issuer_url": {"readonly": True}, + } + + _attribute_map = { + "issuer_url": {"key": "issuerURL", "type": "str"}, + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether the OIDC issuer is enabled. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.issuer_url = None + self.enabled = enabled + + +class ManagedClusterPodIdentity(_serialization.Model): + """Details about the pod identity assigned to the Managed Cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the pod identity. Required. + :vartype name: str + :ivar namespace: The namespace of the pod identity. Required. + :vartype namespace: str + :ivar binding_selector: The binding selector to use for the AzureIdentityBinding resource. + :vartype binding_selector: str + :ivar identity: The user assigned identity details. Required. + :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + :ivar provisioning_state: The current provisioning state of the pod identity. Known values are: + "Assigned", "Canceled", "Deleting", "Failed", "Succeeded", and "Updating". + :vartype provisioning_state: str or + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningState + :ivar provisioning_info: + :vartype provisioning_info: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningInfo + """ + + _validation = { + "name": {"required": True}, + "namespace": {"required": True}, + "identity": {"required": True}, + "provisioning_state": {"readonly": True}, + "provisioning_info": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "namespace": {"key": "namespace", "type": "str"}, + "binding_selector": {"key": "bindingSelector", "type": "str"}, + "identity": {"key": "identity", "type": "UserAssignedIdentity"}, + "provisioning_state": {"key": "provisioningState", "type": "str"}, + "provisioning_info": {"key": "provisioningInfo", "type": "ManagedClusterPodIdentityProvisioningInfo"}, + } + + def __init__( + self, + *, + name: str, + namespace: str, + identity: "_models.UserAssignedIdentity", + binding_selector: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the pod identity. Required. + :paramtype name: str + :keyword namespace: The namespace of the pod identity. Required. + :paramtype namespace: str + :keyword binding_selector: The binding selector to use for the AzureIdentityBinding resource. + :paramtype binding_selector: str + :keyword identity: The user assigned identity details. Required. + :paramtype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + """ + super().__init__(**kwargs) + self.name = name + self.namespace = namespace + self.binding_selector = binding_selector + self.identity = identity + self.provisioning_state = None + self.provisioning_info = None + + +class ManagedClusterPodIdentityException(_serialization.Model): + """See `disable AAD Pod Identity for a specific Pod/Application + `_ for more + details. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the pod identity exception. Required. + :vartype name: str + :ivar namespace: The namespace of the pod identity exception. Required. + :vartype namespace: str + :ivar pod_labels: The pod labels to match. Required. + :vartype pod_labels: dict[str, str] + """ + + _validation = { + "name": {"required": True}, + "namespace": {"required": True}, + "pod_labels": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "namespace": {"key": "namespace", "type": "str"}, + "pod_labels": {"key": "podLabels", "type": "{str}"}, + } + + def __init__(self, *, name: str, namespace: str, pod_labels: Dict[str, str], **kwargs: Any) -> None: + """ + :keyword name: The name of the pod identity exception. Required. + :paramtype name: str + :keyword namespace: The namespace of the pod identity exception. Required. + :paramtype namespace: str + :keyword pod_labels: The pod labels to match. Required. + :paramtype pod_labels: dict[str, str] + """ + super().__init__(**kwargs) + self.name = name + self.namespace = namespace + self.pod_labels = pod_labels + + +class ManagedClusterPodIdentityProfile(_serialization.Model): + """See `use AAD pod identity `_ + for more details on pod identity integration. + + :ivar enabled: Whether the pod identity addon is enabled. + :vartype enabled: bool + :ivar allow_network_plugin_kubenet: Running in Kubenet is disabled by default due to the + security related nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet + network plugin with AAD Pod Identity + `_ # pylint: disable=line-too-long + for more information. + :vartype allow_network_plugin_kubenet: bool + :ivar user_assigned_identities: The pod identities to use in the cluster. + :vartype user_assigned_identities: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentity] + :ivar user_assigned_identity_exceptions: The pod identity exceptions to allow. + :vartype user_assigned_identity_exceptions: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityException] + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "allow_network_plugin_kubenet": {"key": "allowNetworkPluginKubenet", "type": "bool"}, + "user_assigned_identities": {"key": "userAssignedIdentities", "type": "[ManagedClusterPodIdentity]"}, + "user_assigned_identity_exceptions": { + "key": "userAssignedIdentityExceptions", + "type": "[ManagedClusterPodIdentityException]", + }, + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + allow_network_plugin_kubenet: Optional[bool] = None, + user_assigned_identities: Optional[List["_models.ManagedClusterPodIdentity"]] = None, + user_assigned_identity_exceptions: Optional[List["_models.ManagedClusterPodIdentityException"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Whether the pod identity addon is enabled. + :paramtype enabled: bool + :keyword allow_network_plugin_kubenet: Running in Kubenet is disabled by default due to the + security related nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet + network plugin with AAD Pod Identity + `_ # pylint: disable=line-too-long + for more information. + :paramtype allow_network_plugin_kubenet: bool + :keyword user_assigned_identities: The pod identities to use in the cluster. + :paramtype user_assigned_identities: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentity] + :keyword user_assigned_identity_exceptions: The pod identity exceptions to allow. + :paramtype user_assigned_identity_exceptions: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityException] + """ + super().__init__(**kwargs) + self.enabled = enabled + self.allow_network_plugin_kubenet = allow_network_plugin_kubenet + self.user_assigned_identities = user_assigned_identities + self.user_assigned_identity_exceptions = user_assigned_identity_exceptions + + +class ManagedClusterPodIdentityProvisioningError(_serialization.Model): # pylint: disable=name-too-long + """An error response from the pod identity provisioning. + + :ivar error: Details about the error. + :vartype error: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody + """ + + _attribute_map = { + "error": {"key": "error", "type": "ManagedClusterPodIdentityProvisioningErrorBody"}, + } + + def __init__( + self, *, error: Optional["_models.ManagedClusterPodIdentityProvisioningErrorBody"] = None, **kwargs: Any + ) -> None: + """ + :keyword error: Details about the error. + :paramtype error: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody + """ + super().__init__(**kwargs) + self.error = error + + +class ManagedClusterPodIdentityProvisioningErrorBody(_serialization.Model): # pylint: disable=name-too-long + """An error response from the pod identity provisioning. + + :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :vartype code: str + :ivar message: A message describing the error, intended to be suitable for display in a user + interface. + :vartype message: str + :ivar target: The target of the particular error. For example, the name of the property in + error. + :vartype target: str + :ivar details: A list of additional details about the error. + :vartype details: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody] + """ + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[ManagedClusterPodIdentityProvisioningErrorBody]"}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + target: Optional[str] = None, + details: Optional[List["_models.ManagedClusterPodIdentityProvisioningErrorBody"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :paramtype code: str + :keyword message: A message describing the error, intended to be suitable for display in a user + interface. + :paramtype message: str + :keyword target: The target of the particular error. For example, the name of the property in + error. + :paramtype target: str + :keyword details: A list of additional details about the error. + :paramtype details: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody] + """ + super().__init__(**kwargs) + self.code = code + self.message = message + self.target = target + self.details = details + + +class ManagedClusterPodIdentityProvisioningInfo(_serialization.Model): # pylint: disable=name-too-long + """ManagedClusterPodIdentityProvisioningInfo. + + :ivar error: Pod identity assignment error (if any). + :vartype error: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningError + """ + + _attribute_map = { + "error": {"key": "error", "type": "ManagedClusterPodIdentityProvisioningError"}, + } + + def __init__( + self, *, error: Optional["_models.ManagedClusterPodIdentityProvisioningError"] = None, **kwargs: Any + ) -> None: + """ + :keyword error: Pod identity assignment error (if any). + :paramtype error: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningError + """ + super().__init__(**kwargs) + self.error = error + + +class ManagedClusterPoolUpgradeProfile(_serialization.Model): + """The list of available upgrade versions. + + All required parameters must be populated in order to send to server. + + :ivar kubernetes_version: The Kubernetes version (major.minor.patch). Required. + :vartype kubernetes_version: str + :ivar name: The Agent Pool name. + :vartype name: str + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar upgrades: List of orchestrator types and versions available for upgrade. + :vartype upgrades: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem] + """ + + _validation = { + "kubernetes_version": {"required": True}, + "os_type": {"required": True}, + } + + _attribute_map = { + "kubernetes_version": {"key": "kubernetesVersion", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "os_type": {"key": "osType", "type": "str"}, + "upgrades": {"key": "upgrades", "type": "[ManagedClusterPoolUpgradeProfileUpgradesItem]"}, + } + + def __init__( + self, + *, + kubernetes_version: str, + os_type: Union[str, "_models.OSType"] = "Linux", + name: Optional[str] = None, + upgrades: Optional[List["_models.ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword kubernetes_version: The Kubernetes version (major.minor.patch). Required. + :paramtype kubernetes_version: str + :keyword name: The Agent Pool name. + :paramtype name: str + :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" + and "Windows". + :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :keyword upgrades: List of orchestrator types and versions available for upgrade. + :paramtype upgrades: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem] + """ + super().__init__(**kwargs) + self.kubernetes_version = kubernetes_version + self.name = name + self.os_type = os_type + self.upgrades = upgrades + + +class ManagedClusterPoolUpgradeProfileUpgradesItem(_serialization.Model): # pylint: disable=name-too-long + """ManagedClusterPoolUpgradeProfileUpgradesItem. + + :ivar kubernetes_version: The Kubernetes version (major.minor.patch). + :vartype kubernetes_version: str + :ivar is_preview: Whether the Kubernetes version is currently in preview. + :vartype is_preview: bool + """ + + _attribute_map = { + "kubernetes_version": {"key": "kubernetesVersion", "type": "str"}, + "is_preview": {"key": "isPreview", "type": "bool"}, + } + + def __init__( + self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any + ) -> None: + """ + :keyword kubernetes_version: The Kubernetes version (major.minor.patch). + :paramtype kubernetes_version: str + :keyword is_preview: Whether the Kubernetes version is currently in preview. + :paramtype is_preview: bool + """ + super().__init__(**kwargs) + self.kubernetes_version = kubernetes_version + self.is_preview = is_preview + + +class ManagedClusterPropertiesAutoScalerProfile( + _serialization.Model +): # pylint: disable=too-many-instance-attributes,name-too-long + """Parameters to be applied to the cluster-autoscaler when enabled. + + :ivar balance_similar_node_groups: Valid values are 'true' and 'false'. + :vartype balance_similar_node_groups: str + :ivar daemonset_eviction_for_empty_nodes: If set to true, all daemonset pods on empty nodes + will be evicted before deletion of the node. If the daemonset pod cannot be evicted another + node will be chosen for scaling. If set to false, the node will be deleted without ensuring + that daemonset pods are deleted or evicted. + :vartype daemonset_eviction_for_empty_nodes: bool + :ivar daemonset_eviction_for_occupied_nodes: If set to true, all daemonset pods on occupied + nodes will be evicted before deletion of the node. If the daemonset pod cannot be evicted + another node will be chosen for scaling. If set to false, the node will be deleted without + ensuring that daemonset pods are deleted or evicted. + :vartype daemonset_eviction_for_occupied_nodes: bool + :ivar ignore_daemonsets_utilization: If set to true, the resources used by daemonset will be + taken into account when making scaling down decisions. + :vartype ignore_daemonsets_utilization: bool + :ivar expander: If not specified, the default is 'random'. See `expanders + `_ + for more information. Known values are: "least-waste", "most-pods", "priority", and "random". + :vartype expander: str or ~azure.mgmt.containerservice.v2024_07_01.models.Expander + :ivar max_empty_bulk_delete: The default is 10. + :vartype max_empty_bulk_delete: str + :ivar max_graceful_termination_sec: The default is 600. + :vartype max_graceful_termination_sec: str + :ivar max_node_provision_time: The default is '15m'. Values must be an integer followed by an + 'm'. No unit of time other than minutes (m) is supported. + :vartype max_node_provision_time: str + :ivar max_total_unready_percentage: The default is 45. The maximum is 100 and the minimum is 0. + :vartype max_total_unready_percentage: str + :ivar new_pod_scale_up_delay: For scenarios like burst/batch scale where you don't want CA to + act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore + unscheduled pods before they're a certain age. The default is '0s'. Values must be an integer + followed by a unit ('s' for seconds, 'm' for minutes, 'h' for hours, etc). + :vartype new_pod_scale_up_delay: str + :ivar ok_total_unready_count: This must be an integer. The default is 3. + :vartype ok_total_unready_count: str + :ivar scan_interval: The default is '10'. Values must be an integer number of seconds. + :vartype scan_interval: str + :ivar scale_down_delay_after_add: The default is '10m'. Values must be an integer followed by + an 'm'. No unit of time other than minutes (m) is supported. + :vartype scale_down_delay_after_add: str + :ivar scale_down_delay_after_delete: The default is the scan-interval. Values must be an + integer followed by an 'm'. No unit of time other than minutes (m) is supported. + :vartype scale_down_delay_after_delete: str + :ivar scale_down_delay_after_failure: The default is '3m'. Values must be an integer followed + by an 'm'. No unit of time other than minutes (m) is supported. + :vartype scale_down_delay_after_failure: str + :ivar scale_down_unneeded_time: The default is '10m'. Values must be an integer followed by an + 'm'. No unit of time other than minutes (m) is supported. + :vartype scale_down_unneeded_time: str + :ivar scale_down_unready_time: The default is '20m'. Values must be an integer followed by an + 'm'. No unit of time other than minutes (m) is supported. + :vartype scale_down_unready_time: str + :ivar scale_down_utilization_threshold: The default is '0.5'. + :vartype scale_down_utilization_threshold: str + :ivar skip_nodes_with_local_storage: The default is true. + :vartype skip_nodes_with_local_storage: str + :ivar skip_nodes_with_system_pods: The default is true. + :vartype skip_nodes_with_system_pods: str + """ + + _attribute_map = { + "balance_similar_node_groups": {"key": "balance-similar-node-groups", "type": "str"}, + "daemonset_eviction_for_empty_nodes": {"key": "daemonset-eviction-for-empty-nodes", "type": "bool"}, + "daemonset_eviction_for_occupied_nodes": {"key": "daemonset-eviction-for-occupied-nodes", "type": "bool"}, + "ignore_daemonsets_utilization": {"key": "ignore-daemonsets-utilization", "type": "bool"}, + "expander": {"key": "expander", "type": "str"}, + "max_empty_bulk_delete": {"key": "max-empty-bulk-delete", "type": "str"}, + "max_graceful_termination_sec": {"key": "max-graceful-termination-sec", "type": "str"}, + "max_node_provision_time": {"key": "max-node-provision-time", "type": "str"}, + "max_total_unready_percentage": {"key": "max-total-unready-percentage", "type": "str"}, + "new_pod_scale_up_delay": {"key": "new-pod-scale-up-delay", "type": "str"}, + "ok_total_unready_count": {"key": "ok-total-unready-count", "type": "str"}, + "scan_interval": {"key": "scan-interval", "type": "str"}, + "scale_down_delay_after_add": {"key": "scale-down-delay-after-add", "type": "str"}, + "scale_down_delay_after_delete": {"key": "scale-down-delay-after-delete", "type": "str"}, + "scale_down_delay_after_failure": {"key": "scale-down-delay-after-failure", "type": "str"}, + "scale_down_unneeded_time": {"key": "scale-down-unneeded-time", "type": "str"}, + "scale_down_unready_time": {"key": "scale-down-unready-time", "type": "str"}, + "scale_down_utilization_threshold": {"key": "scale-down-utilization-threshold", "type": "str"}, + "skip_nodes_with_local_storage": {"key": "skip-nodes-with-local-storage", "type": "str"}, + "skip_nodes_with_system_pods": {"key": "skip-nodes-with-system-pods", "type": "str"}, + } + + def __init__( + self, + *, + balance_similar_node_groups: Optional[str] = None, + daemonset_eviction_for_empty_nodes: Optional[bool] = None, + daemonset_eviction_for_occupied_nodes: Optional[bool] = None, + ignore_daemonsets_utilization: Optional[bool] = None, + expander: Optional[Union[str, "_models.Expander"]] = None, + max_empty_bulk_delete: Optional[str] = None, + max_graceful_termination_sec: Optional[str] = None, + max_node_provision_time: Optional[str] = None, + max_total_unready_percentage: Optional[str] = None, + new_pod_scale_up_delay: Optional[str] = None, + ok_total_unready_count: Optional[str] = None, + scan_interval: Optional[str] = None, + scale_down_delay_after_add: Optional[str] = None, + scale_down_delay_after_delete: Optional[str] = None, + scale_down_delay_after_failure: Optional[str] = None, + scale_down_unneeded_time: Optional[str] = None, + scale_down_unready_time: Optional[str] = None, + scale_down_utilization_threshold: Optional[str] = None, + skip_nodes_with_local_storage: Optional[str] = None, + skip_nodes_with_system_pods: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword balance_similar_node_groups: Valid values are 'true' and 'false'. + :paramtype balance_similar_node_groups: str + :keyword daemonset_eviction_for_empty_nodes: If set to true, all daemonset pods on empty nodes + will be evicted before deletion of the node. If the daemonset pod cannot be evicted another + node will be chosen for scaling. If set to false, the node will be deleted without ensuring + that daemonset pods are deleted or evicted. + :paramtype daemonset_eviction_for_empty_nodes: bool + :keyword daemonset_eviction_for_occupied_nodes: If set to true, all daemonset pods on occupied + nodes will be evicted before deletion of the node. If the daemonset pod cannot be evicted + another node will be chosen for scaling. If set to false, the node will be deleted without + ensuring that daemonset pods are deleted or evicted. + :paramtype daemonset_eviction_for_occupied_nodes: bool + :keyword ignore_daemonsets_utilization: If set to true, the resources used by daemonset will be + taken into account when making scaling down decisions. + :paramtype ignore_daemonsets_utilization: bool + :keyword expander: If not specified, the default is 'random'. See `expanders + `_ + for more information. Known values are: "least-waste", "most-pods", "priority", and "random". + :paramtype expander: str or ~azure.mgmt.containerservice.v2024_07_01.models.Expander + :keyword max_empty_bulk_delete: The default is 10. + :paramtype max_empty_bulk_delete: str + :keyword max_graceful_termination_sec: The default is 600. + :paramtype max_graceful_termination_sec: str + :keyword max_node_provision_time: The default is '15m'. Values must be an integer followed by + an 'm'. No unit of time other than minutes (m) is supported. + :paramtype max_node_provision_time: str + :keyword max_total_unready_percentage: The default is 45. The maximum is 100 and the minimum is + 0. + :paramtype max_total_unready_percentage: str + :keyword new_pod_scale_up_delay: For scenarios like burst/batch scale where you don't want CA + to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore + unscheduled pods before they're a certain age. The default is '0s'. Values must be an integer + followed by a unit ('s' for seconds, 'm' for minutes, 'h' for hours, etc). + :paramtype new_pod_scale_up_delay: str + :keyword ok_total_unready_count: This must be an integer. The default is 3. + :paramtype ok_total_unready_count: str + :keyword scan_interval: The default is '10'. Values must be an integer number of seconds. + :paramtype scan_interval: str + :keyword scale_down_delay_after_add: The default is '10m'. Values must be an integer followed + by an 'm'. No unit of time other than minutes (m) is supported. + :paramtype scale_down_delay_after_add: str + :keyword scale_down_delay_after_delete: The default is the scan-interval. Values must be an + integer followed by an 'm'. No unit of time other than minutes (m) is supported. + :paramtype scale_down_delay_after_delete: str + :keyword scale_down_delay_after_failure: The default is '3m'. Values must be an integer + followed by an 'm'. No unit of time other than minutes (m) is supported. + :paramtype scale_down_delay_after_failure: str + :keyword scale_down_unneeded_time: The default is '10m'. Values must be an integer followed by + an 'm'. No unit of time other than minutes (m) is supported. + :paramtype scale_down_unneeded_time: str + :keyword scale_down_unready_time: The default is '20m'. Values must be an integer followed by + an 'm'. No unit of time other than minutes (m) is supported. + :paramtype scale_down_unready_time: str + :keyword scale_down_utilization_threshold: The default is '0.5'. + :paramtype scale_down_utilization_threshold: str + :keyword skip_nodes_with_local_storage: The default is true. + :paramtype skip_nodes_with_local_storage: str + :keyword skip_nodes_with_system_pods: The default is true. + :paramtype skip_nodes_with_system_pods: str + """ + super().__init__(**kwargs) + self.balance_similar_node_groups = balance_similar_node_groups + self.daemonset_eviction_for_empty_nodes = daemonset_eviction_for_empty_nodes + self.daemonset_eviction_for_occupied_nodes = daemonset_eviction_for_occupied_nodes + self.ignore_daemonsets_utilization = ignore_daemonsets_utilization + self.expander = expander + self.max_empty_bulk_delete = max_empty_bulk_delete + self.max_graceful_termination_sec = max_graceful_termination_sec + self.max_node_provision_time = max_node_provision_time + self.max_total_unready_percentage = max_total_unready_percentage + self.new_pod_scale_up_delay = new_pod_scale_up_delay + self.ok_total_unready_count = ok_total_unready_count + self.scan_interval = scan_interval + self.scale_down_delay_after_add = scale_down_delay_after_add + self.scale_down_delay_after_delete = scale_down_delay_after_delete + self.scale_down_delay_after_failure = scale_down_delay_after_failure + self.scale_down_unneeded_time = scale_down_unneeded_time + self.scale_down_unready_time = scale_down_unready_time + self.scale_down_utilization_threshold = scale_down_utilization_threshold + self.skip_nodes_with_local_storage = skip_nodes_with_local_storage + self.skip_nodes_with_system_pods = skip_nodes_with_system_pods + + +class ManagedClusterSecurityProfile(_serialization.Model): + """Security profile for the container service cluster. + + :ivar defender: Microsoft Defender settings for the security profile. + :vartype defender: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefender + :ivar azure_key_vault_kms: Azure Key Vault `key management service + `_ settings for the security + profile. + :vartype azure_key_vault_kms: ~azure.mgmt.containerservice.v2024_07_01.models.AzureKeyVaultKms + :ivar workload_identity: Workload identity settings for the security profile. Workload identity + enables Kubernetes applications to access Azure cloud resources securely with Azure AD. See + https://aka.ms/aks/wi for more details. + :vartype workload_identity: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileWorkloadIdentity + :ivar image_cleaner: Image Cleaner settings for the security profile. + :vartype image_cleaner: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileImageCleaner + """ + + _attribute_map = { + "defender": {"key": "defender", "type": "ManagedClusterSecurityProfileDefender"}, + "azure_key_vault_kms": {"key": "azureKeyVaultKms", "type": "AzureKeyVaultKms"}, + "workload_identity": {"key": "workloadIdentity", "type": "ManagedClusterSecurityProfileWorkloadIdentity"}, + "image_cleaner": {"key": "imageCleaner", "type": "ManagedClusterSecurityProfileImageCleaner"}, + } + + def __init__( + self, + *, + defender: Optional["_models.ManagedClusterSecurityProfileDefender"] = None, + azure_key_vault_kms: Optional["_models.AzureKeyVaultKms"] = None, + workload_identity: Optional["_models.ManagedClusterSecurityProfileWorkloadIdentity"] = None, + image_cleaner: Optional["_models.ManagedClusterSecurityProfileImageCleaner"] = None, + **kwargs: Any + ) -> None: + """ + :keyword defender: Microsoft Defender settings for the security profile. + :paramtype defender: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefender + :keyword azure_key_vault_kms: Azure Key Vault `key management service + `_ settings for the security + profile. + :paramtype azure_key_vault_kms: + ~azure.mgmt.containerservice.v2024_07_01.models.AzureKeyVaultKms + :keyword workload_identity: Workload identity settings for the security profile. Workload + identity enables Kubernetes applications to access Azure cloud resources securely with Azure + AD. See https://aka.ms/aks/wi for more details. + :paramtype workload_identity: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileWorkloadIdentity + :keyword image_cleaner: Image Cleaner settings for the security profile. + :paramtype image_cleaner: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileImageCleaner + """ + super().__init__(**kwargs) + self.defender = defender + self.azure_key_vault_kms = azure_key_vault_kms + self.workload_identity = workload_identity + self.image_cleaner = image_cleaner + + +class ManagedClusterSecurityProfileDefender(_serialization.Model): + """Microsoft Defender settings for the security profile. + + :ivar log_analytics_workspace_resource_id: Resource ID of the Log Analytics workspace to be + associated with Microsoft Defender. When Microsoft Defender is enabled, this field is required + and must be a valid workspace resource ID. When Microsoft Defender is disabled, leave the field + empty. + :vartype log_analytics_workspace_resource_id: str + :ivar security_monitoring: Microsoft Defender threat detection for Cloud settings for the + security profile. + :vartype security_monitoring: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring + """ + + _attribute_map = { + "log_analytics_workspace_resource_id": {"key": "logAnalyticsWorkspaceResourceId", "type": "str"}, + "security_monitoring": { + "key": "securityMonitoring", + "type": "ManagedClusterSecurityProfileDefenderSecurityMonitoring", + }, + } + + def __init__( + self, + *, + log_analytics_workspace_resource_id: Optional[str] = None, + security_monitoring: Optional["_models.ManagedClusterSecurityProfileDefenderSecurityMonitoring"] = None, + **kwargs: Any + ) -> None: + """ + :keyword log_analytics_workspace_resource_id: Resource ID of the Log Analytics workspace to be + associated with Microsoft Defender. When Microsoft Defender is enabled, this field is required + and must be a valid workspace resource ID. When Microsoft Defender is disabled, leave the field + empty. + :paramtype log_analytics_workspace_resource_id: str + :keyword security_monitoring: Microsoft Defender threat detection for Cloud settings for the + security profile. + :paramtype security_monitoring: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring + """ + super().__init__(**kwargs) + self.log_analytics_workspace_resource_id = log_analytics_workspace_resource_id + self.security_monitoring = security_monitoring + + +class ManagedClusterSecurityProfileDefenderSecurityMonitoring(_serialization.Model): # pylint: disable=name-too-long + """Microsoft Defender settings for the security profile threat detection. + + :ivar enabled: Whether to enable Defender threat detection. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable Defender threat detection. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterSecurityProfileImageCleaner(_serialization.Model): # pylint: disable=name-too-long + """Image Cleaner removes unused images from nodes, freeing up disk space and helping to reduce + attack surface area. Here are settings for the security profile. + + :ivar enabled: Whether to enable Image Cleaner on AKS cluster. + :vartype enabled: bool + :ivar interval_hours: Image Cleaner scanning interval in hours. + :vartype interval_hours: int + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "interval_hours": {"key": "intervalHours", "type": "int"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, interval_hours: Optional[int] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable Image Cleaner on AKS cluster. + :paramtype enabled: bool + :keyword interval_hours: Image Cleaner scanning interval in hours. + :paramtype interval_hours: int + """ + super().__init__(**kwargs) + self.enabled = enabled + self.interval_hours = interval_hours + + +class ManagedClusterSecurityProfileWorkloadIdentity(_serialization.Model): # pylint: disable=name-too-long + """Workload identity settings for the security profile. + + :ivar enabled: Whether to enable workload identity. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable workload identity. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterServicePrincipalProfile(_serialization.Model): + """Information about a service principal identity for the cluster to use for manipulating Azure + APIs. + + All required parameters must be populated in order to send to server. + + :ivar client_id: The ID for the service principal. Required. + :vartype client_id: str + :ivar secret: The secret password associated with the service principal in plain text. + :vartype secret: str + """ + + _validation = { + "client_id": {"required": True}, + } + + _attribute_map = { + "client_id": {"key": "clientId", "type": "str"}, + "secret": {"key": "secret", "type": "str"}, + } + + def __init__(self, *, client_id: str, secret: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword client_id: The ID for the service principal. Required. + :paramtype client_id: str + :keyword secret: The secret password associated with the service principal in plain text. + :paramtype secret: str + """ + super().__init__(**kwargs) + self.client_id = client_id + self.secret = secret + + +class ManagedClusterSKU(_serialization.Model): + """The SKU of a Managed Cluster. + + :ivar name: The name of a managed cluster SKU. "Base" + :vartype name: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUName + :ivar tier: If not specified, the default is 'Free'. See `AKS Pricing Tier + `_ for more details. Known + values are: "Premium", "Standard", and "Free". + :vartype tier: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUTier + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "tier": {"key": "tier", "type": "str"}, + } + + def __init__( + self, + *, + name: Optional[Union[str, "_models.ManagedClusterSKUName"]] = None, + tier: Optional[Union[str, "_models.ManagedClusterSKUTier"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of a managed cluster SKU. "Base" + :paramtype name: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUName + :keyword tier: If not specified, the default is 'Free'. See `AKS Pricing Tier + `_ for more details. Known + values are: "Premium", "Standard", and "Free". + :paramtype tier: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUTier + """ + super().__init__(**kwargs) + self.name = name + self.tier = tier + + +class ManagedClusterStorageProfile(_serialization.Model): + """Storage profile for the container service cluster. + + :ivar disk_csi_driver: AzureDisk CSI Driver settings for the storage profile. + :vartype disk_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileDiskCSIDriver + :ivar file_csi_driver: AzureFile CSI Driver settings for the storage profile. + :vartype file_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileFileCSIDriver + :ivar snapshot_controller: Snapshot Controller settings for the storage profile. + :vartype snapshot_controller: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileSnapshotController + :ivar blob_csi_driver: AzureBlob CSI Driver settings for the storage profile. + :vartype blob_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileBlobCSIDriver + """ + + _attribute_map = { + "disk_csi_driver": {"key": "diskCSIDriver", "type": "ManagedClusterStorageProfileDiskCSIDriver"}, + "file_csi_driver": {"key": "fileCSIDriver", "type": "ManagedClusterStorageProfileFileCSIDriver"}, + "snapshot_controller": {"key": "snapshotController", "type": "ManagedClusterStorageProfileSnapshotController"}, + "blob_csi_driver": {"key": "blobCSIDriver", "type": "ManagedClusterStorageProfileBlobCSIDriver"}, + } + + def __init__( + self, + *, + disk_csi_driver: Optional["_models.ManagedClusterStorageProfileDiskCSIDriver"] = None, + file_csi_driver: Optional["_models.ManagedClusterStorageProfileFileCSIDriver"] = None, + snapshot_controller: Optional["_models.ManagedClusterStorageProfileSnapshotController"] = None, + blob_csi_driver: Optional["_models.ManagedClusterStorageProfileBlobCSIDriver"] = None, + **kwargs: Any + ) -> None: + """ + :keyword disk_csi_driver: AzureDisk CSI Driver settings for the storage profile. + :paramtype disk_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileDiskCSIDriver + :keyword file_csi_driver: AzureFile CSI Driver settings for the storage profile. + :paramtype file_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileFileCSIDriver + :keyword snapshot_controller: Snapshot Controller settings for the storage profile. + :paramtype snapshot_controller: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileSnapshotController + :keyword blob_csi_driver: AzureBlob CSI Driver settings for the storage profile. + :paramtype blob_csi_driver: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileBlobCSIDriver + """ + super().__init__(**kwargs) + self.disk_csi_driver = disk_csi_driver + self.file_csi_driver = file_csi_driver + self.snapshot_controller = snapshot_controller + self.blob_csi_driver = blob_csi_driver + + +class ManagedClusterStorageProfileBlobCSIDriver(_serialization.Model): # pylint: disable=name-too-long + """AzureBlob CSI Driver settings for the storage profile. + + :ivar enabled: Whether to enable AzureBlob CSI Driver. The default value is false. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable AzureBlob CSI Driver. The default value is false. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterStorageProfileDiskCSIDriver(_serialization.Model): # pylint: disable=name-too-long + """AzureDisk CSI Driver settings for the storage profile. + + :ivar enabled: Whether to enable AzureDisk CSI Driver. The default value is true. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable AzureDisk CSI Driver. The default value is true. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterStorageProfileFileCSIDriver(_serialization.Model): # pylint: disable=name-too-long + """AzureFile CSI Driver settings for the storage profile. + + :ivar enabled: Whether to enable AzureFile CSI Driver. The default value is true. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable AzureFile CSI Driver. The default value is true. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterStorageProfileSnapshotController(_serialization.Model): # pylint: disable=name-too-long + """Snapshot Controller settings for the storage profile. + + :ivar enabled: Whether to enable Snapshot Controller. The default value is true. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable Snapshot Controller. The default value is true. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterUpgradeProfile(_serialization.Model): + """The list of available upgrades for compute pools. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the upgrade profile. + :vartype id: str + :ivar name: The name of the upgrade profile. + :vartype name: str + :ivar type: The type of the upgrade profile. + :vartype type: str + :ivar control_plane_profile: The list of available upgrade versions for the control plane. + Required. + :vartype control_plane_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile + :ivar agent_pool_profiles: The list of available upgrade versions for agent pools. Required. + :vartype agent_pool_profiles: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "control_plane_profile": {"required": True}, + "agent_pool_profiles": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "control_plane_profile": {"key": "properties.controlPlaneProfile", "type": "ManagedClusterPoolUpgradeProfile"}, + "agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterPoolUpgradeProfile]"}, + } + + def __init__( + self, + *, + control_plane_profile: "_models.ManagedClusterPoolUpgradeProfile", + agent_pool_profiles: List["_models.ManagedClusterPoolUpgradeProfile"], + **kwargs: Any + ) -> None: + """ + :keyword control_plane_profile: The list of available upgrade versions for the control plane. + Required. + :paramtype control_plane_profile: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile + :keyword agent_pool_profiles: The list of available upgrade versions for agent pools. Required. + :paramtype agent_pool_profiles: + list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile] + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.control_plane_profile = control_plane_profile + self.agent_pool_profiles = agent_pool_profiles + + +class ManagedClusterWindowsProfile(_serialization.Model): + """Profile for Windows VMs in the managed cluster. + + All required parameters must be populated in order to send to server. + + :ivar admin_username: Specifies the name of the administrator account. :code:`
`:code:`
` + **Restriction:** Cannot end in "." :code:`
`:code:`
` **Disallowed values:** + "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", + "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", + "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", + "test3", "user4", "user5". :code:`
`:code:`
` **Minimum-length:** 1 character + :code:`
`:code:`
` **Max-length:** 20 characters. Required. + :vartype admin_username: str + :ivar admin_password: Specifies the password of the administrator account. + :code:`
`:code:`
` **Minimum-length:** 8 characters :code:`
`:code:`
` + **Max-length:** 123 characters :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 + conditions below need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper + characters :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) + :code:`
`:code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", + "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!". + :vartype admin_password: str + :ivar license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits + `_ for more details. Known values are: + "None" and "Windows_Server". + :vartype license_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.LicenseType + :ivar enable_csi_proxy: For more details on CSI proxy, see the `CSI proxy GitHub repo + `_. + :vartype enable_csi_proxy: bool + :ivar gmsa_profile: The Windows gMSA Profile in the Managed Cluster. + :vartype gmsa_profile: ~azure.mgmt.containerservice.v2024_07_01.models.WindowsGmsaProfile + """ + + _validation = { + "admin_username": {"required": True}, + } + + _attribute_map = { + "admin_username": {"key": "adminUsername", "type": "str"}, + "admin_password": {"key": "adminPassword", "type": "str"}, + "license_type": {"key": "licenseType", "type": "str"}, + "enable_csi_proxy": {"key": "enableCSIProxy", "type": "bool"}, + "gmsa_profile": {"key": "gmsaProfile", "type": "WindowsGmsaProfile"}, + } + + def __init__( + self, + *, + admin_username: str, + admin_password: Optional[str] = None, + license_type: Optional[Union[str, "_models.LicenseType"]] = None, + enable_csi_proxy: Optional[bool] = None, + gmsa_profile: Optional["_models.WindowsGmsaProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword admin_username: Specifies the name of the administrator account. + :code:`
`:code:`
` **Restriction:** Cannot end in "." :code:`
`:code:`
` + **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", + "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", + "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", + "sys", "test2", "test3", "user4", "user5". :code:`
`:code:`
` **Minimum-length:** 1 + character :code:`
`:code:`
` **Max-length:** 20 characters. Required. + :paramtype admin_username: str + :keyword admin_password: Specifies the password of the administrator account. + :code:`
`:code:`
` **Minimum-length:** 8 characters :code:`
`:code:`
` + **Max-length:** 123 characters :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 + conditions below need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper + characters :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) + :code:`
`:code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", + "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!". + :paramtype admin_password: str + :keyword license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits + `_ for more details. Known values are: + "None" and "Windows_Server". + :paramtype license_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.LicenseType + :keyword enable_csi_proxy: For more details on CSI proxy, see the `CSI proxy GitHub repo + `_. + :paramtype enable_csi_proxy: bool + :keyword gmsa_profile: The Windows gMSA Profile in the Managed Cluster. + :paramtype gmsa_profile: ~azure.mgmt.containerservice.v2024_07_01.models.WindowsGmsaProfile + """ + super().__init__(**kwargs) + self.admin_username = admin_username + self.admin_password = admin_password + self.license_type = license_type + self.enable_csi_proxy = enable_csi_proxy + self.gmsa_profile = gmsa_profile + + +class ManagedClusterWorkloadAutoScalerProfile(_serialization.Model): + """Workload Auto-scaler profile for the managed cluster. + + :ivar keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler + profile. + :vartype keda: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileKeda + :ivar vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload + auto-scaler profile. + :vartype vertical_pod_autoscaler: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler + """ + + _attribute_map = { + "keda": {"key": "keda", "type": "ManagedClusterWorkloadAutoScalerProfileKeda"}, + "vertical_pod_autoscaler": { + "key": "verticalPodAutoscaler", + "type": "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler", + }, + } + + def __init__( + self, + *, + keda: Optional["_models.ManagedClusterWorkloadAutoScalerProfileKeda"] = None, + vertical_pod_autoscaler: Optional[ + "_models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler" + ] = None, + **kwargs: Any + ) -> None: + """ + :keyword keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler + profile. + :paramtype keda: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileKeda + :keyword vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload + auto-scaler profile. + :paramtype vertical_pod_autoscaler: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler + """ + super().__init__(**kwargs) + self.keda = keda + self.vertical_pod_autoscaler = vertical_pod_autoscaler + + +class ManagedClusterWorkloadAutoScalerProfileKeda(_serialization.Model): # pylint: disable=name-too-long + """KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler profile. + + All required parameters must be populated in order to send to server. + + :ivar enabled: Whether to enable KEDA. Required. + :vartype enabled: bool + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: bool, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable KEDA. Required. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler( + _serialization.Model +): # pylint: disable=name-too-long + """VPA (Vertical Pod Autoscaler) settings for the workload auto-scaler profile. + + All required parameters must be populated in order to send to server. + + :ivar enabled: Whether to enable VPA. Default value is false. Required. + :vartype enabled: bool + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: bool = False, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable VPA. Default value is false. Required. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedServiceIdentityUserAssignedIdentitiesValue(_serialization.Model): # pylint: disable=name-too-long + """ManagedServiceIdentityUserAssignedIdentitiesValue. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar principal_id: The principal id of user assigned identity. + :vartype principal_id: str + :ivar client_id: The client id of user assigned identity. + :vartype client_id: str + """ + + _validation = { + "principal_id": {"readonly": True}, + "client_id": {"readonly": True}, + } + + _attribute_map = { + "principal_id": {"key": "principalId", "type": "str"}, + "client_id": {"key": "clientId", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.principal_id = None + self.client_id = None + + +class MeshRevision(_serialization.Model): + """Holds information on upgrades and compatibility for given major.minor mesh release. + + :ivar revision: The revision of the mesh release. + :vartype revision: str + :ivar upgrades: List of revisions available for upgrade of a specific mesh revision. + :vartype upgrades: list[str] + :ivar compatible_with: List of items this revision of service mesh is compatible with, and + their associated versions. + :vartype compatible_with: + list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + """ + + _attribute_map = { + "revision": {"key": "revision", "type": "str"}, + "upgrades": {"key": "upgrades", "type": "[str]"}, + "compatible_with": {"key": "compatibleWith", "type": "[CompatibleVersions]"}, + } + + def __init__( + self, + *, + revision: Optional[str] = None, + upgrades: Optional[List[str]] = None, + compatible_with: Optional[List["_models.CompatibleVersions"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword revision: The revision of the mesh release. + :paramtype revision: str + :keyword upgrades: List of revisions available for upgrade of a specific mesh revision. + :paramtype upgrades: list[str] + :keyword compatible_with: List of items this revision of service mesh is compatible with, and + their associated versions. + :paramtype compatible_with: + list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + """ + super().__init__(**kwargs) + self.revision = revision + self.upgrades = upgrades + self.compatible_with = compatible_with + + +class ProxyResource(Resource): + """The resource model definition for a Azure Resource Manager proxy resource. It will not have + tags and a location. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + """ + + +class MeshRevisionProfile(ProxyResource): + """Mesh revision profile for a mesh. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar properties: Mesh revision profile properties for a mesh. + :vartype properties: + ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfileProperties + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "properties": {"key": "properties", "type": "MeshRevisionProfileProperties"}, + } + + def __init__(self, *, properties: Optional["_models.MeshRevisionProfileProperties"] = None, **kwargs: Any) -> None: + """ + :keyword properties: Mesh revision profile properties for a mesh. + :paramtype properties: + ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfileProperties + """ + super().__init__(**kwargs) + self.properties = properties + + +class MeshRevisionProfileList(_serialization.Model): + """Holds an array of MeshRevisionsProfiles. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: Array of service mesh add-on revision profiles for all supported mesh modes. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :ivar next_link: The URL to get the next set of mesh revision profile. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[MeshRevisionProfile]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.MeshRevisionProfile"]] = None, **kwargs: Any) -> None: + """ + :keyword value: Array of service mesh add-on revision profiles for all supported mesh modes. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class MeshRevisionProfileProperties(_serialization.Model): + """Mesh revision profile properties for a mesh. + + :ivar mesh_revisions: + :vartype mesh_revisions: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevision] + """ + + _attribute_map = { + "mesh_revisions": {"key": "meshRevisions", "type": "[MeshRevision]"}, + } + + def __init__(self, *, mesh_revisions: Optional[List["_models.MeshRevision"]] = None, **kwargs: Any) -> None: + """ + :keyword mesh_revisions: + :paramtype mesh_revisions: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevision] + """ + super().__init__(**kwargs) + self.mesh_revisions = mesh_revisions + + +class MeshUpgradeProfile(ProxyResource): + """Upgrade profile for given mesh. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar properties: Mesh upgrade profile properties for a major.minor release. + :vartype properties: + ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfileProperties + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "properties": {"key": "properties", "type": "MeshUpgradeProfileProperties"}, + } + + def __init__(self, *, properties: Optional["_models.MeshUpgradeProfileProperties"] = None, **kwargs: Any) -> None: + """ + :keyword properties: Mesh upgrade profile properties for a major.minor release. + :paramtype properties: + ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfileProperties + """ + super().__init__(**kwargs) + self.properties = properties + + +class MeshUpgradeProfileList(_serialization.Model): + """Holds an array of MeshUpgradeProfiles. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: Array of supported service mesh add-on upgrade profiles. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :ivar next_link: The URL to get the next set of mesh upgrade profile. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[MeshUpgradeProfile]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.MeshUpgradeProfile"]] = None, **kwargs: Any) -> None: + """ + :keyword value: Array of supported service mesh add-on upgrade profiles. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class MeshUpgradeProfileProperties(MeshRevision): + """Mesh upgrade profile properties for a major.minor release. + + :ivar revision: The revision of the mesh release. + :vartype revision: str + :ivar upgrades: List of revisions available for upgrade of a specific mesh revision. + :vartype upgrades: list[str] + :ivar compatible_with: List of items this revision of service mesh is compatible with, and + their associated versions. + :vartype compatible_with: + list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + """ + + +class OperationListResult(_serialization.Model): + """The List Operation response. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: The list of operations. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + """ + + _validation = { + "value": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[OperationValue]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + + +class OperationValue(_serialization.Model): + """Describes the properties of a Operation value. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar origin: The origin of the operation. + :vartype origin: str + :ivar name: The name of the operation. + :vartype name: str + :ivar operation: The display name of the operation. + :vartype operation: str + :ivar resource: The display name of the resource the operation applies to. + :vartype resource: str + :ivar description: The description of the operation. + :vartype description: str + :ivar provider: The resource provider for the operation. + :vartype provider: str + """ + + _validation = { + "origin": {"readonly": True}, + "name": {"readonly": True}, + "operation": {"readonly": True}, + "resource": {"readonly": True}, + "description": {"readonly": True}, + "provider": {"readonly": True}, + } + + _attribute_map = { + "origin": {"key": "origin", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "operation": {"key": "display.operation", "type": "str"}, + "resource": {"key": "display.resource", "type": "str"}, + "description": {"key": "display.description", "type": "str"}, + "provider": {"key": "display.provider", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.origin = None + self.name = None + self.operation = None + self.resource = None + self.description = None + self.provider = None + + +class OutboundEnvironmentEndpoint(_serialization.Model): + """Egress endpoints which AKS agent nodes connect to for common purpose. + + :ivar category: The category of endpoints accessed by the AKS agent node, e.g. + azure-resource-management, apiserver, etc. + :vartype category: str + :ivar endpoints: The endpoints that AKS agent nodes connect to. + :vartype endpoints: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDependency] + """ + + _attribute_map = { + "category": {"key": "category", "type": "str"}, + "endpoints": {"key": "endpoints", "type": "[EndpointDependency]"}, + } + + def __init__( + self, + *, + category: Optional[str] = None, + endpoints: Optional[List["_models.EndpointDependency"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword category: The category of endpoints accessed by the AKS agent node, e.g. + azure-resource-management, apiserver, etc. + :paramtype category: str + :keyword endpoints: The endpoints that AKS agent nodes connect to. + :paramtype endpoints: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDependency] + """ + super().__init__(**kwargs) + self.category = category + self.endpoints = endpoints + + +class OutboundEnvironmentEndpointCollection(_serialization.Model): + """Collection of OutboundEnvironmentEndpoint. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar value: Collection of resources. Required. + :vartype value: + list[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + :ivar next_link: Link to next page of resources. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[OutboundEnvironmentEndpoint]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: List["_models.OutboundEnvironmentEndpoint"], **kwargs: Any) -> None: + """ + :keyword value: Collection of resources. Required. + :paramtype value: + list[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class PortRange(_serialization.Model): + """The port range. + + :ivar port_start: The minimum port that is included in the range. It should be ranged from 1 to + 65535, and be less than or equal to portEnd. + :vartype port_start: int + :ivar port_end: The maximum port that is included in the range. It should be ranged from 1 to + 65535, and be greater than or equal to portStart. + :vartype port_end: int + :ivar protocol: The network protocol of the port. Known values are: "TCP" and "UDP". + :vartype protocol: str or ~azure.mgmt.containerservice.v2024_07_01.models.Protocol + """ + + _validation = { + "port_start": {"maximum": 65535, "minimum": 1}, + "port_end": {"maximum": 65535, "minimum": 1}, + } + + _attribute_map = { + "port_start": {"key": "portStart", "type": "int"}, + "port_end": {"key": "portEnd", "type": "int"}, + "protocol": {"key": "protocol", "type": "str"}, + } + + def __init__( + self, + *, + port_start: Optional[int] = None, + port_end: Optional[int] = None, + protocol: Optional[Union[str, "_models.Protocol"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword port_start: The minimum port that is included in the range. It should be ranged from 1 + to 65535, and be less than or equal to portEnd. + :paramtype port_start: int + :keyword port_end: The maximum port that is included in the range. It should be ranged from 1 + to 65535, and be greater than or equal to portStart. + :paramtype port_end: int + :keyword protocol: The network protocol of the port. Known values are: "TCP" and "UDP". + :paramtype protocol: str or ~azure.mgmt.containerservice.v2024_07_01.models.Protocol + """ + super().__init__(**kwargs) + self.port_start = port_start + self.port_end = port_end + self.protocol = protocol + + +class PowerState(_serialization.Model): + """Describes the Power State of the cluster. + + :ivar code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and + "Stopped". + :vartype code: str or ~azure.mgmt.containerservice.v2024_07_01.models.Code + """ + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + } + + def __init__(self, *, code: Optional[Union[str, "_models.Code"]] = None, **kwargs: Any) -> None: + """ + :keyword code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and + "Stopped". + :paramtype code: str or ~azure.mgmt.containerservice.v2024_07_01.models.Code + """ + super().__init__(**kwargs) + self.code = code + + +class PrivateEndpoint(_serialization.Model): + """Private endpoint which a connection belongs to. + + :ivar id: The resource ID of the private endpoint. + :vartype id: str + """ + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin + """ + :keyword id: The resource ID of the private endpoint. + :paramtype id: str + """ + super().__init__(**kwargs) + self.id = id + + +class PrivateEndpointConnection(_serialization.Model): + """A private endpoint connection. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: The ID of the private endpoint connection. + :vartype id: str + :ivar name: The name of the private endpoint connection. + :vartype name: str + :ivar type: The resource type. + :vartype type: str + :ivar provisioning_state: The current provisioning state. Known values are: "Canceled", + "Creating", "Deleting", "Failed", and "Succeeded". + :vartype provisioning_state: str or + ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionProvisioningState + :ivar private_endpoint: The resource of private endpoint. + :vartype private_endpoint: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpoint + :ivar private_link_service_connection_state: A collection of information about the state of the + connection between service consumer and provider. + :vartype private_link_service_connection_state: + ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkServiceConnectionState + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "provisioning_state": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"}, + "private_link_service_connection_state": { + "key": "properties.privateLinkServiceConnectionState", + "type": "PrivateLinkServiceConnectionState", + }, + } + + def __init__( + self, + *, + private_endpoint: Optional["_models.PrivateEndpoint"] = None, + private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None, + **kwargs: Any + ) -> None: + """ + :keyword private_endpoint: The resource of private endpoint. + :paramtype private_endpoint: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpoint + :keyword private_link_service_connection_state: A collection of information about the state of + the connection between service consumer and provider. + :paramtype private_link_service_connection_state: + ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkServiceConnectionState + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.provisioning_state = None + self.private_endpoint = private_endpoint + self.private_link_service_connection_state = private_link_service_connection_state + + +class PrivateEndpointConnectionListResult(_serialization.Model): + """A list of private endpoint connections. + + :ivar value: The collection value. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection] + """ + + _attribute_map = { + "value": {"key": "value", "type": "[PrivateEndpointConnection]"}, + } + + def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The collection value. + :paramtype value: + list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection] + """ + super().__init__(**kwargs) + self.value = value + + +class PrivateLinkResource(_serialization.Model): + """A private link resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: The ID of the private link resource. + :vartype id: str + :ivar name: The name of the private link resource. + :vartype name: str + :ivar type: The resource type. + :vartype type: str + :ivar group_id: The group ID of the resource. + :vartype group_id: str + :ivar required_members: The RequiredMembers of the resource. + :vartype required_members: list[str] + :ivar private_link_service_id: The private link service ID of the resource, this field is + exposed only to NRP internally. + :vartype private_link_service_id: str + """ + + _validation = { + "private_link_service_id": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "group_id": {"key": "groupId", "type": "str"}, + "required_members": {"key": "requiredMembers", "type": "[str]"}, + "private_link_service_id": {"key": "privateLinkServiceID", "type": "str"}, + } + + def __init__( + self, + *, + id: Optional[str] = None, # pylint: disable=redefined-builtin + name: Optional[str] = None, + type: Optional[str] = None, + group_id: Optional[str] = None, + required_members: Optional[List[str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: The ID of the private link resource. + :paramtype id: str + :keyword name: The name of the private link resource. + :paramtype name: str + :keyword type: The resource type. + :paramtype type: str + :keyword group_id: The group ID of the resource. + :paramtype group_id: str + :keyword required_members: The RequiredMembers of the resource. + :paramtype required_members: list[str] + """ + super().__init__(**kwargs) + self.id = id + self.name = name + self.type = type + self.group_id = group_id + self.required_members = required_members + self.private_link_service_id = None + + +class PrivateLinkResourcesListResult(_serialization.Model): + """A list of private link resources. + + :ivar value: The collection value. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + """ + + _attribute_map = { + "value": {"key": "value", "type": "[PrivateLinkResource]"}, + } + + def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The collection value. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + """ + super().__init__(**kwargs) + self.value = value + + +class PrivateLinkServiceConnectionState(_serialization.Model): + """The state of a private link service connection. + + :ivar status: The private link service connection status. Known values are: "Pending", + "Approved", "Rejected", and "Disconnected". + :vartype status: str or ~azure.mgmt.containerservice.v2024_07_01.models.ConnectionStatus + :ivar description: The private link service connection description. + :vartype description: str + """ + + _attribute_map = { + "status": {"key": "status", "type": "str"}, + "description": {"key": "description", "type": "str"}, + } + + def __init__( + self, + *, + status: Optional[Union[str, "_models.ConnectionStatus"]] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword status: The private link service connection status. Known values are: "Pending", + "Approved", "Rejected", and "Disconnected". + :paramtype status: str or ~azure.mgmt.containerservice.v2024_07_01.models.ConnectionStatus + :keyword description: The private link service connection description. + :paramtype description: str + """ + super().__init__(**kwargs) + self.status = status + self.description = description + + +class RelativeMonthlySchedule(_serialization.Model): + """For schedules like: 'recur every month on the first Monday' or 'recur every 3 months on last + Friday'. + + All required parameters must be populated in order to send to server. + + :ivar interval_months: Specifies the number of months between each set of occurrences. + Required. + :vartype interval_months: int + :ivar week_index: Specifies on which week of the month the dayOfWeek applies. Required. Known + values are: "First", "Second", "Third", "Fourth", and "Last". + :vartype week_index: str or ~azure.mgmt.containerservice.v2024_07_01.models.Type + :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known + values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". + :vartype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + """ + + _validation = { + "interval_months": {"required": True, "maximum": 6, "minimum": 1}, + "week_index": {"required": True}, + "day_of_week": {"required": True}, + } + + _attribute_map = { + "interval_months": {"key": "intervalMonths", "type": "int"}, + "week_index": {"key": "weekIndex", "type": "str"}, + "day_of_week": {"key": "dayOfWeek", "type": "str"}, + } + + def __init__( + self, + *, + interval_months: int, + week_index: Union[str, "_models.Type"], + day_of_week: Union[str, "_models.WeekDay"], + **kwargs: Any + ) -> None: + """ + :keyword interval_months: Specifies the number of months between each set of occurrences. + Required. + :paramtype interval_months: int + :keyword week_index: Specifies on which week of the month the dayOfWeek applies. Required. + Known values are: "First", "Second", "Third", "Fourth", and "Last". + :paramtype week_index: str or ~azure.mgmt.containerservice.v2024_07_01.models.Type + :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required. + Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and + "Saturday". + :paramtype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + """ + super().__init__(**kwargs) + self.interval_months = interval_months + self.week_index = week_index + self.day_of_week = day_of_week + + +class ResourceReference(_serialization.Model): + """A reference to an Azure resource. + + :ivar id: The fully qualified Azure resource id. + :vartype id: str + """ + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin + """ + :keyword id: The fully qualified Azure resource id. + :paramtype id: str + """ + super().__init__(**kwargs) + self.id = id + + +class RunCommandRequest(_serialization.Model): + """A run command request. + + All required parameters must be populated in order to send to server. + + :ivar command: The command to run. Required. + :vartype command: str + :ivar context: A base64 encoded zip file containing the files required by the command. + :vartype context: str + :ivar cluster_token: AuthToken issued for AKS AAD Server App. + :vartype cluster_token: str + """ + + _validation = { + "command": {"required": True}, + } + + _attribute_map = { + "command": {"key": "command", "type": "str"}, + "context": {"key": "context", "type": "str"}, + "cluster_token": {"key": "clusterToken", "type": "str"}, + } + + def __init__( + self, *, command: str, context: Optional[str] = None, cluster_token: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword command: The command to run. Required. + :paramtype command: str + :keyword context: A base64 encoded zip file containing the files required by the command. + :paramtype context: str + :keyword cluster_token: AuthToken issued for AKS AAD Server App. + :paramtype cluster_token: str + """ + super().__init__(**kwargs) + self.command = command + self.context = context + self.cluster_token = cluster_token + + +class RunCommandResult(_serialization.Model): + """run command result. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: The command id. + :vartype id: str + :ivar provisioning_state: provisioning State. + :vartype provisioning_state: str + :ivar exit_code: The exit code of the command. + :vartype exit_code: int + :ivar started_at: The time when the command started. + :vartype started_at: ~datetime.datetime + :ivar finished_at: The time when the command finished. + :vartype finished_at: ~datetime.datetime + :ivar logs: The command output. + :vartype logs: str + :ivar reason: An explanation of why provisioningState is set to failed (if so). + :vartype reason: str + """ + + _validation = { + "id": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "exit_code": {"readonly": True}, + "started_at": {"readonly": True}, + "finished_at": {"readonly": True}, + "logs": {"readonly": True}, + "reason": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "exit_code": {"key": "properties.exitCode", "type": "int"}, + "started_at": {"key": "properties.startedAt", "type": "iso-8601"}, + "finished_at": {"key": "properties.finishedAt", "type": "iso-8601"}, + "logs": {"key": "properties.logs", "type": "str"}, + "reason": {"key": "properties.reason", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + self.provisioning_state = None + self.exit_code = None + self.started_at = None + self.finished_at = None + self.logs = None + self.reason = None + + +class Schedule(_serialization.Model): + """One and only one of the schedule types should be specified. Choose either 'daily', 'weekly', + 'absoluteMonthly' or 'relativeMonthly' for your maintenance schedule. + + :ivar daily: For schedules like: 'recur every day' or 'recur every 3 days'. + :vartype daily: ~azure.mgmt.containerservice.v2024_07_01.models.DailySchedule + :ivar weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'. + :vartype weekly: ~azure.mgmt.containerservice.v2024_07_01.models.WeeklySchedule + :ivar absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every 3 + months on the 20th'. + :vartype absolute_monthly: + ~azure.mgmt.containerservice.v2024_07_01.models.AbsoluteMonthlySchedule + :ivar relative_monthly: For schedules like: 'recur every month on the first Monday' or 'recur + every 3 months on last Friday'. + :vartype relative_monthly: + ~azure.mgmt.containerservice.v2024_07_01.models.RelativeMonthlySchedule + """ + + _attribute_map = { + "daily": {"key": "daily", "type": "DailySchedule"}, + "weekly": {"key": "weekly", "type": "WeeklySchedule"}, + "absolute_monthly": {"key": "absoluteMonthly", "type": "AbsoluteMonthlySchedule"}, + "relative_monthly": {"key": "relativeMonthly", "type": "RelativeMonthlySchedule"}, + } + + def __init__( + self, + *, + daily: Optional["_models.DailySchedule"] = None, + weekly: Optional["_models.WeeklySchedule"] = None, + absolute_monthly: Optional["_models.AbsoluteMonthlySchedule"] = None, + relative_monthly: Optional["_models.RelativeMonthlySchedule"] = None, + **kwargs: Any + ) -> None: + """ + :keyword daily: For schedules like: 'recur every day' or 'recur every 3 days'. + :paramtype daily: ~azure.mgmt.containerservice.v2024_07_01.models.DailySchedule + :keyword weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on + Wednesday'. + :paramtype weekly: ~azure.mgmt.containerservice.v2024_07_01.models.WeeklySchedule + :keyword absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every + 3 months on the 20th'. + :paramtype absolute_monthly: + ~azure.mgmt.containerservice.v2024_07_01.models.AbsoluteMonthlySchedule + :keyword relative_monthly: For schedules like: 'recur every month on the first Monday' or + 'recur every 3 months on last Friday'. + :paramtype relative_monthly: + ~azure.mgmt.containerservice.v2024_07_01.models.RelativeMonthlySchedule + """ + super().__init__(**kwargs) + self.daily = daily + self.weekly = weekly + self.absolute_monthly = absolute_monthly + self.relative_monthly = relative_monthly + + +class ServiceMeshProfile(_serialization.Model): + """Service mesh profile for a managed cluster. + + All required parameters must be populated in order to send to server. + + :ivar mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled". + :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshMode + :ivar istio: Istio service mesh configuration. + :vartype istio: ~azure.mgmt.containerservice.v2024_07_01.models.IstioServiceMesh + """ + + _validation = { + "mode": {"required": True}, + } + + _attribute_map = { + "mode": {"key": "mode", "type": "str"}, + "istio": {"key": "istio", "type": "IstioServiceMesh"}, + } + + def __init__( + self, + *, + mode: Union[str, "_models.ServiceMeshMode"], + istio: Optional["_models.IstioServiceMesh"] = None, + **kwargs: Any + ) -> None: + """ + :keyword mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled". + :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshMode + :keyword istio: Istio service mesh configuration. + :paramtype istio: ~azure.mgmt.containerservice.v2024_07_01.models.IstioServiceMesh + """ + super().__init__(**kwargs) + self.mode = mode + self.istio = istio + + +class Snapshot(TrackedResource): # pylint: disable=too-many-instance-attributes + """A node pool snapshot resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + :ivar creation_data: CreationData to be used to specify the source agent pool resource ID to + create this snapshot. + :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :ivar snapshot_type: The type of a snapshot. The default is NodePool. "NodePool" + :vartype snapshot_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.SnapshotType + :ivar kubernetes_version: The version of Kubernetes. + :vartype kubernetes_version: str + :ivar node_image_version: The version of node image. + :vartype node_image_version: str + :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and + "Windows". + :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is + Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", + "Windows2019", and "Windows2022". + :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + :ivar vm_size: The size of the VM. + :vartype vm_size: str + :ivar enable_fips: Whether to use a FIPS-enabled OS. + :vartype enable_fips: bool + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "location": {"required": True}, + "kubernetes_version": {"readonly": True}, + "node_image_version": {"readonly": True}, + "os_type": {"readonly": True}, + "os_sku": {"readonly": True}, + "vm_size": {"readonly": True}, + "enable_fips": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "tags": {"key": "tags", "type": "{str}"}, + "location": {"key": "location", "type": "str"}, + "creation_data": {"key": "properties.creationData", "type": "CreationData"}, + "snapshot_type": {"key": "properties.snapshotType", "type": "str"}, + "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"}, + "node_image_version": {"key": "properties.nodeImageVersion", "type": "str"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "os_sku": {"key": "properties.osSku", "type": "str"}, + "vm_size": {"key": "properties.vmSize", "type": "str"}, + "enable_fips": {"key": "properties.enableFIPS", "type": "bool"}, + } + + def __init__( + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + creation_data: Optional["_models.CreationData"] = None, + snapshot_type: Union[str, "_models.SnapshotType"] = "NodePool", + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword location: The geo-location where the resource lives. Required. + :paramtype location: str + :keyword creation_data: CreationData to be used to specify the source agent pool resource ID to + create this snapshot. + :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :keyword snapshot_type: The type of a snapshot. The default is NodePool. "NodePool" + :paramtype snapshot_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.SnapshotType + """ + super().__init__(tags=tags, location=location, **kwargs) + self.creation_data = creation_data + self.snapshot_type = snapshot_type + self.kubernetes_version = None + self.node_image_version = None + self.os_type = None + self.os_sku = None + self.vm_size = None + self.enable_fips = None + + +class SnapshotListResult(_serialization.Model): + """The response from the List Snapshots operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: The list of snapshots. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :ivar next_link: The URL to get the next set of snapshot results. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[Snapshot]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.Snapshot"]] = None, **kwargs: Any) -> None: + """ + :keyword value: The list of snapshots. + :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class SysctlConfig(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Sysctl settings for Linux agent nodes. + + :ivar net_core_somaxconn: Sysctl setting net.core.somaxconn. + :vartype net_core_somaxconn: int + :ivar net_core_netdev_max_backlog: Sysctl setting net.core.netdev_max_backlog. + :vartype net_core_netdev_max_backlog: int + :ivar net_core_rmem_default: Sysctl setting net.core.rmem_default. + :vartype net_core_rmem_default: int + :ivar net_core_rmem_max: Sysctl setting net.core.rmem_max. + :vartype net_core_rmem_max: int + :ivar net_core_wmem_default: Sysctl setting net.core.wmem_default. + :vartype net_core_wmem_default: int + :ivar net_core_wmem_max: Sysctl setting net.core.wmem_max. + :vartype net_core_wmem_max: int + :ivar net_core_optmem_max: Sysctl setting net.core.optmem_max. + :vartype net_core_optmem_max: int + :ivar net_ipv4_tcp_max_syn_backlog: Sysctl setting net.ipv4.tcp_max_syn_backlog. + :vartype net_ipv4_tcp_max_syn_backlog: int + :ivar net_ipv4_tcp_max_tw_buckets: Sysctl setting net.ipv4.tcp_max_tw_buckets. + :vartype net_ipv4_tcp_max_tw_buckets: int + :ivar net_ipv4_tcp_fin_timeout: Sysctl setting net.ipv4.tcp_fin_timeout. + :vartype net_ipv4_tcp_fin_timeout: int + :ivar net_ipv4_tcp_keepalive_time: Sysctl setting net.ipv4.tcp_keepalive_time. + :vartype net_ipv4_tcp_keepalive_time: int + :ivar net_ipv4_tcp_keepalive_probes: Sysctl setting net.ipv4.tcp_keepalive_probes. + :vartype net_ipv4_tcp_keepalive_probes: int + :ivar net_ipv4_tcpkeepalive_intvl: Sysctl setting net.ipv4.tcp_keepalive_intvl. + :vartype net_ipv4_tcpkeepalive_intvl: int + :ivar net_ipv4_tcp_tw_reuse: Sysctl setting net.ipv4.tcp_tw_reuse. + :vartype net_ipv4_tcp_tw_reuse: bool + :ivar net_ipv4_ip_local_port_range: Sysctl setting net.ipv4.ip_local_port_range. + :vartype net_ipv4_ip_local_port_range: str + :ivar net_ipv4_neigh_default_gc_thresh1: Sysctl setting net.ipv4.neigh.default.gc_thresh1. + :vartype net_ipv4_neigh_default_gc_thresh1: int + :ivar net_ipv4_neigh_default_gc_thresh2: Sysctl setting net.ipv4.neigh.default.gc_thresh2. + :vartype net_ipv4_neigh_default_gc_thresh2: int + :ivar net_ipv4_neigh_default_gc_thresh3: Sysctl setting net.ipv4.neigh.default.gc_thresh3. + :vartype net_ipv4_neigh_default_gc_thresh3: int + :ivar net_netfilter_nf_conntrack_max: Sysctl setting net.netfilter.nf_conntrack_max. + :vartype net_netfilter_nf_conntrack_max: int + :ivar net_netfilter_nf_conntrack_buckets: Sysctl setting net.netfilter.nf_conntrack_buckets. + :vartype net_netfilter_nf_conntrack_buckets: int + :ivar fs_inotify_max_user_watches: Sysctl setting fs.inotify.max_user_watches. + :vartype fs_inotify_max_user_watches: int + :ivar fs_file_max: Sysctl setting fs.file-max. + :vartype fs_file_max: int + :ivar fs_aio_max_nr: Sysctl setting fs.aio-max-nr. + :vartype fs_aio_max_nr: int + :ivar fs_nr_open: Sysctl setting fs.nr_open. + :vartype fs_nr_open: int + :ivar kernel_threads_max: Sysctl setting kernel.threads-max. + :vartype kernel_threads_max: int + :ivar vm_max_map_count: Sysctl setting vm.max_map_count. + :vartype vm_max_map_count: int + :ivar vm_swappiness: Sysctl setting vm.swappiness. + :vartype vm_swappiness: int + :ivar vm_vfs_cache_pressure: Sysctl setting vm.vfs_cache_pressure. + :vartype vm_vfs_cache_pressure: int + """ + + _validation = { + "net_ipv4_tcpkeepalive_intvl": {"maximum": 90, "minimum": 10}, + "net_netfilter_nf_conntrack_max": {"maximum": 2097152, "minimum": 131072}, + "net_netfilter_nf_conntrack_buckets": {"maximum": 524288, "minimum": 65536}, + } + + _attribute_map = { + "net_core_somaxconn": {"key": "netCoreSomaxconn", "type": "int"}, + "net_core_netdev_max_backlog": {"key": "netCoreNetdevMaxBacklog", "type": "int"}, + "net_core_rmem_default": {"key": "netCoreRmemDefault", "type": "int"}, + "net_core_rmem_max": {"key": "netCoreRmemMax", "type": "int"}, + "net_core_wmem_default": {"key": "netCoreWmemDefault", "type": "int"}, + "net_core_wmem_max": {"key": "netCoreWmemMax", "type": "int"}, + "net_core_optmem_max": {"key": "netCoreOptmemMax", "type": "int"}, + "net_ipv4_tcp_max_syn_backlog": {"key": "netIpv4TcpMaxSynBacklog", "type": "int"}, + "net_ipv4_tcp_max_tw_buckets": {"key": "netIpv4TcpMaxTwBuckets", "type": "int"}, + "net_ipv4_tcp_fin_timeout": {"key": "netIpv4TcpFinTimeout", "type": "int"}, + "net_ipv4_tcp_keepalive_time": {"key": "netIpv4TcpKeepaliveTime", "type": "int"}, + "net_ipv4_tcp_keepalive_probes": {"key": "netIpv4TcpKeepaliveProbes", "type": "int"}, + "net_ipv4_tcpkeepalive_intvl": {"key": "netIpv4TcpkeepaliveIntvl", "type": "int"}, + "net_ipv4_tcp_tw_reuse": {"key": "netIpv4TcpTwReuse", "type": "bool"}, + "net_ipv4_ip_local_port_range": {"key": "netIpv4IpLocalPortRange", "type": "str"}, + "net_ipv4_neigh_default_gc_thresh1": {"key": "netIpv4NeighDefaultGcThresh1", "type": "int"}, + "net_ipv4_neigh_default_gc_thresh2": {"key": "netIpv4NeighDefaultGcThresh2", "type": "int"}, + "net_ipv4_neigh_default_gc_thresh3": {"key": "netIpv4NeighDefaultGcThresh3", "type": "int"}, + "net_netfilter_nf_conntrack_max": {"key": "netNetfilterNfConntrackMax", "type": "int"}, + "net_netfilter_nf_conntrack_buckets": {"key": "netNetfilterNfConntrackBuckets", "type": "int"}, + "fs_inotify_max_user_watches": {"key": "fsInotifyMaxUserWatches", "type": "int"}, + "fs_file_max": {"key": "fsFileMax", "type": "int"}, + "fs_aio_max_nr": {"key": "fsAioMaxNr", "type": "int"}, + "fs_nr_open": {"key": "fsNrOpen", "type": "int"}, + "kernel_threads_max": {"key": "kernelThreadsMax", "type": "int"}, + "vm_max_map_count": {"key": "vmMaxMapCount", "type": "int"}, + "vm_swappiness": {"key": "vmSwappiness", "type": "int"}, + "vm_vfs_cache_pressure": {"key": "vmVfsCachePressure", "type": "int"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + net_core_somaxconn: Optional[int] = None, + net_core_netdev_max_backlog: Optional[int] = None, + net_core_rmem_default: Optional[int] = None, + net_core_rmem_max: Optional[int] = None, + net_core_wmem_default: Optional[int] = None, + net_core_wmem_max: Optional[int] = None, + net_core_optmem_max: Optional[int] = None, + net_ipv4_tcp_max_syn_backlog: Optional[int] = None, + net_ipv4_tcp_max_tw_buckets: Optional[int] = None, + net_ipv4_tcp_fin_timeout: Optional[int] = None, + net_ipv4_tcp_keepalive_time: Optional[int] = None, + net_ipv4_tcp_keepalive_probes: Optional[int] = None, + net_ipv4_tcpkeepalive_intvl: Optional[int] = None, + net_ipv4_tcp_tw_reuse: Optional[bool] = None, + net_ipv4_ip_local_port_range: Optional[str] = None, + net_ipv4_neigh_default_gc_thresh1: Optional[int] = None, + net_ipv4_neigh_default_gc_thresh2: Optional[int] = None, + net_ipv4_neigh_default_gc_thresh3: Optional[int] = None, + net_netfilter_nf_conntrack_max: Optional[int] = None, + net_netfilter_nf_conntrack_buckets: Optional[int] = None, + fs_inotify_max_user_watches: Optional[int] = None, + fs_file_max: Optional[int] = None, + fs_aio_max_nr: Optional[int] = None, + fs_nr_open: Optional[int] = None, + kernel_threads_max: Optional[int] = None, + vm_max_map_count: Optional[int] = None, + vm_swappiness: Optional[int] = None, + vm_vfs_cache_pressure: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword net_core_somaxconn: Sysctl setting net.core.somaxconn. + :paramtype net_core_somaxconn: int + :keyword net_core_netdev_max_backlog: Sysctl setting net.core.netdev_max_backlog. + :paramtype net_core_netdev_max_backlog: int + :keyword net_core_rmem_default: Sysctl setting net.core.rmem_default. + :paramtype net_core_rmem_default: int + :keyword net_core_rmem_max: Sysctl setting net.core.rmem_max. + :paramtype net_core_rmem_max: int + :keyword net_core_wmem_default: Sysctl setting net.core.wmem_default. + :paramtype net_core_wmem_default: int + :keyword net_core_wmem_max: Sysctl setting net.core.wmem_max. + :paramtype net_core_wmem_max: int + :keyword net_core_optmem_max: Sysctl setting net.core.optmem_max. + :paramtype net_core_optmem_max: int + :keyword net_ipv4_tcp_max_syn_backlog: Sysctl setting net.ipv4.tcp_max_syn_backlog. + :paramtype net_ipv4_tcp_max_syn_backlog: int + :keyword net_ipv4_tcp_max_tw_buckets: Sysctl setting net.ipv4.tcp_max_tw_buckets. + :paramtype net_ipv4_tcp_max_tw_buckets: int + :keyword net_ipv4_tcp_fin_timeout: Sysctl setting net.ipv4.tcp_fin_timeout. + :paramtype net_ipv4_tcp_fin_timeout: int + :keyword net_ipv4_tcp_keepalive_time: Sysctl setting net.ipv4.tcp_keepalive_time. + :paramtype net_ipv4_tcp_keepalive_time: int + :keyword net_ipv4_tcp_keepalive_probes: Sysctl setting net.ipv4.tcp_keepalive_probes. + :paramtype net_ipv4_tcp_keepalive_probes: int + :keyword net_ipv4_tcpkeepalive_intvl: Sysctl setting net.ipv4.tcp_keepalive_intvl. + :paramtype net_ipv4_tcpkeepalive_intvl: int + :keyword net_ipv4_tcp_tw_reuse: Sysctl setting net.ipv4.tcp_tw_reuse. + :paramtype net_ipv4_tcp_tw_reuse: bool + :keyword net_ipv4_ip_local_port_range: Sysctl setting net.ipv4.ip_local_port_range. + :paramtype net_ipv4_ip_local_port_range: str + :keyword net_ipv4_neigh_default_gc_thresh1: Sysctl setting net.ipv4.neigh.default.gc_thresh1. + :paramtype net_ipv4_neigh_default_gc_thresh1: int + :keyword net_ipv4_neigh_default_gc_thresh2: Sysctl setting net.ipv4.neigh.default.gc_thresh2. + :paramtype net_ipv4_neigh_default_gc_thresh2: int + :keyword net_ipv4_neigh_default_gc_thresh3: Sysctl setting net.ipv4.neigh.default.gc_thresh3. + :paramtype net_ipv4_neigh_default_gc_thresh3: int + :keyword net_netfilter_nf_conntrack_max: Sysctl setting net.netfilter.nf_conntrack_max. + :paramtype net_netfilter_nf_conntrack_max: int + :keyword net_netfilter_nf_conntrack_buckets: Sysctl setting net.netfilter.nf_conntrack_buckets. + :paramtype net_netfilter_nf_conntrack_buckets: int + :keyword fs_inotify_max_user_watches: Sysctl setting fs.inotify.max_user_watches. + :paramtype fs_inotify_max_user_watches: int + :keyword fs_file_max: Sysctl setting fs.file-max. + :paramtype fs_file_max: int + :keyword fs_aio_max_nr: Sysctl setting fs.aio-max-nr. + :paramtype fs_aio_max_nr: int + :keyword fs_nr_open: Sysctl setting fs.nr_open. + :paramtype fs_nr_open: int + :keyword kernel_threads_max: Sysctl setting kernel.threads-max. + :paramtype kernel_threads_max: int + :keyword vm_max_map_count: Sysctl setting vm.max_map_count. + :paramtype vm_max_map_count: int + :keyword vm_swappiness: Sysctl setting vm.swappiness. + :paramtype vm_swappiness: int + :keyword vm_vfs_cache_pressure: Sysctl setting vm.vfs_cache_pressure. + :paramtype vm_vfs_cache_pressure: int + """ + super().__init__(**kwargs) + self.net_core_somaxconn = net_core_somaxconn + self.net_core_netdev_max_backlog = net_core_netdev_max_backlog + self.net_core_rmem_default = net_core_rmem_default + self.net_core_rmem_max = net_core_rmem_max + self.net_core_wmem_default = net_core_wmem_default + self.net_core_wmem_max = net_core_wmem_max + self.net_core_optmem_max = net_core_optmem_max + self.net_ipv4_tcp_max_syn_backlog = net_ipv4_tcp_max_syn_backlog + self.net_ipv4_tcp_max_tw_buckets = net_ipv4_tcp_max_tw_buckets + self.net_ipv4_tcp_fin_timeout = net_ipv4_tcp_fin_timeout + self.net_ipv4_tcp_keepalive_time = net_ipv4_tcp_keepalive_time + self.net_ipv4_tcp_keepalive_probes = net_ipv4_tcp_keepalive_probes + self.net_ipv4_tcpkeepalive_intvl = net_ipv4_tcpkeepalive_intvl + self.net_ipv4_tcp_tw_reuse = net_ipv4_tcp_tw_reuse + self.net_ipv4_ip_local_port_range = net_ipv4_ip_local_port_range + self.net_ipv4_neigh_default_gc_thresh1 = net_ipv4_neigh_default_gc_thresh1 + self.net_ipv4_neigh_default_gc_thresh2 = net_ipv4_neigh_default_gc_thresh2 + self.net_ipv4_neigh_default_gc_thresh3 = net_ipv4_neigh_default_gc_thresh3 + self.net_netfilter_nf_conntrack_max = net_netfilter_nf_conntrack_max + self.net_netfilter_nf_conntrack_buckets = net_netfilter_nf_conntrack_buckets + self.fs_inotify_max_user_watches = fs_inotify_max_user_watches + self.fs_file_max = fs_file_max + self.fs_aio_max_nr = fs_aio_max_nr + self.fs_nr_open = fs_nr_open + self.kernel_threads_max = kernel_threads_max + self.vm_max_map_count = vm_max_map_count + self.vm_swappiness = vm_swappiness + self.vm_vfs_cache_pressure = vm_vfs_cache_pressure + + +class SystemData(_serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The type of identity that created the resource. Known values are: + "User", "Application", "ManagedIdentity", and "Key". + :vartype created_by_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :ivar created_at: The timestamp of resource creation (UTC). + :vartype created_at: ~datetime.datetime + :ivar last_modified_by: The identity that last modified the resource. + :vartype last_modified_by: str + :ivar last_modified_by_type: The type of identity that last modified the resource. Known values + are: "User", "Application", "ManagedIdentity", and "Key". + :vartype last_modified_by_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + "created_by": {"key": "createdBy", "type": "str"}, + "created_by_type": {"key": "createdByType", "type": "str"}, + "created_at": {"key": "createdAt", "type": "iso-8601"}, + "last_modified_by": {"key": "lastModifiedBy", "type": "str"}, + "last_modified_by_type": {"key": "lastModifiedByType", "type": "str"}, + "last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"}, + } + + def __init__( + self, + *, + created_by: Optional[str] = None, + created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + created_at: Optional[datetime.datetime] = None, + last_modified_by: Optional[str] = None, + last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """ + :keyword created_by: The identity that created the resource. + :paramtype created_by: str + :keyword created_by_type: The type of identity that created the resource. Known values are: + "User", "Application", "ManagedIdentity", and "Key". + :paramtype created_by_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :keyword created_at: The timestamp of resource creation (UTC). + :paramtype created_at: ~datetime.datetime + :keyword last_modified_by: The identity that last modified the resource. + :paramtype last_modified_by: str + :keyword last_modified_by_type: The type of identity that last modified the resource. Known + values are: "User", "Application", "ManagedIdentity", and "Key". + :paramtype last_modified_by_type: str or + ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :keyword last_modified_at: The timestamp of resource last modification (UTC). + :paramtype last_modified_at: ~datetime.datetime + """ + super().__init__(**kwargs) + self.created_by = created_by + self.created_by_type = created_by_type + self.created_at = created_at + self.last_modified_by = last_modified_by + self.last_modified_by_type = last_modified_by_type + self.last_modified_at = last_modified_at + + +class TagsObject(_serialization.Model): + """Tags object for patch operations. + + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + """ + + _attribute_map = { + "tags": {"key": "tags", "type": "{str}"}, + } + + def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + """ + super().__init__(**kwargs) + self.tags = tags + + +class TimeInWeek(_serialization.Model): + """Time in a week. + + :ivar day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", + "Thursday", "Friday", and "Saturday". + :vartype day: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :ivar hour_slots: Each integer hour represents a time range beginning at 0m after the hour + ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 + UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. + :vartype hour_slots: list[int] + """ + + _attribute_map = { + "day": {"key": "day", "type": "str"}, + "hour_slots": {"key": "hourSlots", "type": "[int]"}, + } + + def __init__( + self, + *, + day: Optional[Union[str, "_models.WeekDay"]] = None, + hour_slots: Optional[List[int]] = None, + **kwargs: Any + ) -> None: + """ + :keyword day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", + "Wednesday", "Thursday", "Friday", and "Saturday". + :paramtype day: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :keyword hour_slots: Each integer hour represents a time range beginning at 0m after the hour + ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 + UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. + :paramtype hour_slots: list[int] + """ + super().__init__(**kwargs) + self.day = day + self.hour_slots = hour_slots + + +class TimeSpan(_serialization.Model): + """For example, between 2021-05-25T13:00:00Z and 2021-05-25T14:00:00Z. + + :ivar start: The start of a time span. + :vartype start: ~datetime.datetime + :ivar end: The end of a time span. + :vartype end: ~datetime.datetime + """ + + _attribute_map = { + "start": {"key": "start", "type": "iso-8601"}, + "end": {"key": "end", "type": "iso-8601"}, + } + + def __init__( + self, *, start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, **kwargs: Any + ) -> None: + """ + :keyword start: The start of a time span. + :paramtype start: ~datetime.datetime + :keyword end: The end of a time span. + :paramtype end: ~datetime.datetime + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class TrustedAccessRole(_serialization.Model): + """Trusted access role definition. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar source_resource_type: Resource type of Azure resource. + :vartype source_resource_type: str + :ivar name: Name of role, name is unique under a source resource type. + :vartype name: str + :ivar rules: List of rules for the role. This maps to 'rules' property of `Kubernetes Cluster + Role + `_. + :vartype rules: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleRule] + """ + + _validation = { + "source_resource_type": {"readonly": True}, + "name": {"readonly": True}, + "rules": {"readonly": True}, + } + + _attribute_map = { + "source_resource_type": {"key": "sourceResourceType", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "rules": {"key": "rules", "type": "[TrustedAccessRoleRule]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.source_resource_type = None + self.name = None + self.rules = None + + +class TrustedAccessRoleBinding(Resource): + """Defines binding between a resource and role. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: Fully qualified resource ID for the resource. E.g. + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :ivar provisioning_state: The current provisioning state of trusted access role binding. Known + values are: "Canceled", "Deleting", "Failed", "Succeeded", and "Updating". + :vartype provisioning_state: str or + ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBindingProvisioningState + :ivar source_resource_id: The ARM resource ID of source resource that trusted access is + configured for. Required. + :vartype source_resource_id: str + :ivar roles: A list of roles to bind, each item is a resource type qualified role name. For + example: 'Microsoft.MachineLearningServices/workspaces/reader'. Required. + :vartype roles: list[str] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "source_resource_id": {"required": True}, + "roles": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "source_resource_id": {"key": "properties.sourceResourceId", "type": "str"}, + "roles": {"key": "properties.roles", "type": "[str]"}, + } + + def __init__(self, *, source_resource_id: str, roles: List[str], **kwargs: Any) -> None: + """ + :keyword source_resource_id: The ARM resource ID of source resource that trusted access is + configured for. Required. + :paramtype source_resource_id: str + :keyword roles: A list of roles to bind, each item is a resource type qualified role name. For + example: 'Microsoft.MachineLearningServices/workspaces/reader'. Required. + :paramtype roles: list[str] + """ + super().__init__(**kwargs) + self.provisioning_state = None + self.source_resource_id = source_resource_id + self.roles = roles + + +class TrustedAccessRoleBindingListResult(_serialization.Model): + """List of trusted access role bindings. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: Role binding list. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :ivar next_link: Link to next page of resources. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[TrustedAccessRoleBinding]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.TrustedAccessRoleBinding"]] = None, **kwargs: Any) -> None: + """ + :keyword value: Role binding list. + :paramtype value: + list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class TrustedAccessRoleListResult(_serialization.Model): + """List of trusted access roles. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: Role list. + :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + :ivar next_link: Link to next page of resources. + :vartype next_link: str + """ + + _validation = { + "value": {"readonly": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[TrustedAccessRole]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + self.next_link = None + + +class TrustedAccessRoleRule(_serialization.Model): + """Rule for trusted access role. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar verbs: List of allowed verbs. + :vartype verbs: list[str] + :ivar api_groups: List of allowed apiGroups. + :vartype api_groups: list[str] + :ivar resources: List of allowed resources. + :vartype resources: list[str] + :ivar resource_names: List of allowed names. + :vartype resource_names: list[str] + :ivar non_resource_ur_ls: List of allowed nonResourceURLs. + :vartype non_resource_ur_ls: list[str] + """ + + _validation = { + "verbs": {"readonly": True}, + "api_groups": {"readonly": True}, + "resources": {"readonly": True}, + "resource_names": {"readonly": True}, + "non_resource_ur_ls": {"readonly": True}, + } + + _attribute_map = { + "verbs": {"key": "verbs", "type": "[str]"}, + "api_groups": {"key": "apiGroups", "type": "[str]"}, + "resources": {"key": "resources", "type": "[str]"}, + "resource_names": {"key": "resourceNames", "type": "[str]"}, + "non_resource_ur_ls": {"key": "nonResourceURLs", "type": "[str]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.verbs = None + self.api_groups = None + self.resources = None + self.resource_names = None + self.non_resource_ur_ls = None + + +class UpgradeOverrideSettings(_serialization.Model): + """Settings for overrides when upgrading a cluster. + + :ivar force_upgrade: Whether to force upgrade the cluster. Note that this option instructs + upgrade operation to bypass upgrade protections such as checking for deprecated API usage. + Enable this option only with caution. + :vartype force_upgrade: bool + :ivar until: Until when the overrides are effective. Note that this only matches the start time + of an upgrade, and the effectiveness won't change once an upgrade starts even if the ``until`` + expires as upgrade proceeds. This field is not set by default. It must be set for the overrides + to take effect. + :vartype until: ~datetime.datetime + """ + + _attribute_map = { + "force_upgrade": {"key": "forceUpgrade", "type": "bool"}, + "until": {"key": "until", "type": "iso-8601"}, + } + + def __init__( + self, *, force_upgrade: Optional[bool] = None, until: Optional[datetime.datetime] = None, **kwargs: Any + ) -> None: + """ + :keyword force_upgrade: Whether to force upgrade the cluster. Note that this option instructs + upgrade operation to bypass upgrade protections such as checking for deprecated API usage. + Enable this option only with caution. + :paramtype force_upgrade: bool + :keyword until: Until when the overrides are effective. Note that this only matches the start + time of an upgrade, and the effectiveness won't change once an upgrade starts even if the + ``until`` expires as upgrade proceeds. This field is not set by default. It must be set for the + overrides to take effect. + :paramtype until: ~datetime.datetime + """ + super().__init__(**kwargs) + self.force_upgrade = force_upgrade + self.until = until + + +class WeeklySchedule(_serialization.Model): + """For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'. + + All required parameters must be populated in order to send to server. + + :ivar interval_weeks: Specifies the number of weeks between each set of occurrences. Required. + :vartype interval_weeks: int + :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known + values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". + :vartype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + """ + + _validation = { + "interval_weeks": {"required": True, "maximum": 4, "minimum": 1}, + "day_of_week": {"required": True}, + } + + _attribute_map = { + "interval_weeks": {"key": "intervalWeeks", "type": "int"}, + "day_of_week": {"key": "dayOfWeek", "type": "str"}, + } + + def __init__(self, *, interval_weeks: int, day_of_week: Union[str, "_models.WeekDay"], **kwargs: Any) -> None: + """ + :keyword interval_weeks: Specifies the number of weeks between each set of occurrences. + Required. + :paramtype interval_weeks: int + :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required. + Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and + "Saturday". + :paramtype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + """ + super().__init__(**kwargs) + self.interval_weeks = interval_weeks + self.day_of_week = day_of_week + + +class WindowsGmsaProfile(_serialization.Model): + """Windows gMSA Profile in the managed cluster. + + :ivar enabled: Specifies whether to enable Windows gMSA in the managed cluster. + :vartype enabled: bool + :ivar dns_server: Specifies the DNS server for Windows gMSA. :code:`
`:code:`
` Set it to + empty if you have configured the DNS server in the vnet which is used to create the managed + cluster. + :vartype dns_server: str + :ivar root_domain_name: Specifies the root domain name for Windows gMSA. + :code:`
`:code:`
` Set it to empty if you have configured the DNS server in the vnet + which is used to create the managed cluster. + :vartype root_domain_name: str + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "dns_server": {"key": "dnsServer", "type": "str"}, + "root_domain_name": {"key": "rootDomainName", "type": "str"}, + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + dns_server: Optional[str] = None, + root_domain_name: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Specifies whether to enable Windows gMSA in the managed cluster. + :paramtype enabled: bool + :keyword dns_server: Specifies the DNS server for Windows gMSA. :code:`
`:code:`
` Set it + to empty if you have configured the DNS server in the vnet which is used to create the managed + cluster. + :paramtype dns_server: str + :keyword root_domain_name: Specifies the root domain name for Windows gMSA. + :code:`
`:code:`
` Set it to empty if you have configured the DNS server in the vnet + which is used to create the managed cluster. + :paramtype root_domain_name: str + """ + super().__init__(**kwargs) + self.enabled = enabled + self.dns_server = dns_server + self.root_domain_name = root_domain_name diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py new file mode 100644 index 00000000000..d59e9e8a9f1 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import Operations +from ._managed_clusters_operations import ManagedClustersOperations +from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations +from ._agent_pools_operations import AgentPoolsOperations +from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations +from ._private_link_resources_operations import PrivateLinkResourcesOperations +from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations +from ._snapshots_operations import SnapshotsOperations +from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations +from ._trusted_access_roles_operations import TrustedAccessRolesOperations +from ._machines_operations import MachinesOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Operations", + "ManagedClustersOperations", + "MaintenanceConfigurationsOperations", + "AgentPoolsOperations", + "PrivateEndpointConnectionsOperations", + "PrivateLinkResourcesOperations", + "ResolvePrivateLinkServiceIdOperations", + "SnapshotsOperations", + "TrustedAccessRoleBindingsOperations", + "TrustedAccessRolesOperations", + "MachinesOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py new file mode 100644 index 00000000000..105b291dc8e --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py @@ -0,0 +1,1508 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_abort_latest_operation_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/agentPools/{agentPoolName}/abort", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_upgrade_profile_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_machines_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/deleteMachines", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_available_agent_pool_versions_request( # pylint: disable=name-too-long + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_upgrade_node_image_version_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class AgentPoolsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`agent_pools` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _abort_latest_operation_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_abort_latest_operation_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["location"] = self._deserialize("str", response.headers.get("location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_abort_latest_operation( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Aborts last operation running on agent pool. + + Aborts the currently running operation on the agent pool. The Agent Pool will be moved to a + Canceling state and eventually to a Canceled state when cancellation finishes. If the operation + completes before cancellation can take place, a 409 error code is returned. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._abort_latest_operation_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterable["_models.AgentPool"]: + """Gets a list of agent pools in the specified managed cluster. + + Gets a list of agent pools in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either AgentPool or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("AgentPoolListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> _models.AgentPool: + """Gets the specified managed cluster agent pool. + + Gets the specified managed cluster agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: AgentPool or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: Union[_models.AgentPool, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "AgentPool") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: _models.AgentPool, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + parameters: Union[_models.AgentPool, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.AgentPool]: + """Creates or updates an agent pool in the specified managed cluster. + + Creates or updates an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param parameters: The agent pool to create or update. Is either a AgentPool type or a + IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool or IO[bytes] + :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AgentPool].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AgentPool]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes an agent pool in the specified managed cluster. + + Deletes an agent pool in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def get_upgrade_profile( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> _models.AgentPoolUpgradeProfile: + """Gets the upgrade profile for an agent pool. + + Gets the upgrade profile for an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: AgentPoolUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _delete_machines_initial( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(machines, (IOBase, bytes)): + _content = machines + else: + _json = self._serialize.body(machines, "AgentPoolDeleteMachinesParameter") + + _request = build_delete_machines_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: _models.AgentPoolDeleteMachinesParameter, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Required. + :type machines: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Required. + :type machines: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_delete_machines( + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], + **kwargs: Any + ) -> LROPoller[None]: + """Deletes specific machines in an agent pool. + + Deletes specific machines in an agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machines: A list of machines from the agent pool to be deleted. Is either a + AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required. + :type machines: + ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter or IO[bytes] + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_machines_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + machines=machines, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def get_available_agent_pool_versions( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.AgentPoolAvailableVersions: + """Gets a list of supported Kubernetes versions for the specified agent pool. + + See `supported Kubernetes versions + `_ for more details about + the version lifecycle. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: AgentPoolAvailableVersions or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersions + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None) + + _request = build_get_available_agent_pool_versions_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _upgrade_node_image_version_initial( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_upgrade_node_image_version_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_upgrade_node_image_version( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> LROPoller[_models.AgentPool]: + """Upgrades the node image version of an agent pool to the latest. + + Upgrading the node image version of an agent pool applies the newest OS and runtime updates to + the nodes. AKS provides one new image per week with the latest updates. For more details on + node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._upgrade_node_image_version_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = self._deserialize("AgentPool", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AgentPool].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AgentPool]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py new file mode 100644 index 00000000000..0e8b467df4d --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py @@ -0,0 +1,307 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request( + resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + machine_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines/{machineName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "agentPoolName": _SERIALIZER.url( + "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$" + ), + "machineName": _SERIALIZER.url( + "machine_name", machine_name, "str", pattern=r"^[a-zA-Z0-9][-_a-zA-Z0-9]{0,39}$" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class MachinesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`machines` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + ) -> Iterable["_models.Machine"]: + """Gets a list of machines in the specified agent pool. + + Gets a list of machines in the specified agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :return: An iterator like instance of either Machine or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("MachineListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, agent_pool_name: str, machine_name: str, **kwargs: Any + ) -> _models.Machine: + """Get a specific machine in the specified agent pool. + + Get a specific machine in the specified agent pool. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param agent_pool_name: The name of the agent pool. Required. + :type agent_pool_name: str + :param machine_name: host name of the machine. Required. + :type machine_name: str + :return: Machine or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Machine + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.Machine] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + agent_pool_name=agent_pool_name, + machine_name=machine_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Machine", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py new file mode 100644 index 00000000000..0f092d0fda9 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py @@ -0,0 +1,581 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterable, Optional, Type, TypeVar, Union, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_by_managed_cluster_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "configName": _SERIALIZER.url("config_name", config_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "configName": _SERIALIZER.url("config_name", config_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "configName": _SERIALIZER.url("config_name", config_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class MaintenanceConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`maintenance_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list_by_managed_cluster( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterable["_models.MaintenanceConfiguration"]: + """Gets a list of maintenance configurations in the specified managed cluster. + + Gets a list of maintenance configurations in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either MaintenanceConfiguration or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_managed_cluster_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Gets the specified maintenance configuration of a managed cluster. + + Gets the specified maintenance configuration of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: _models.MaintenanceConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + config_name: str, + parameters: Union[_models.MaintenanceConfiguration, IO[bytes]], + **kwargs: Any + ) -> _models.MaintenanceConfiguration: + """Creates or updates a maintenance configuration in the specified managed cluster. + + Creates or updates a maintenance configuration in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :param parameters: The maintenance configuration to create or update. Is either a + MaintenanceConfiguration type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration or + IO[bytes] + :return: MaintenanceConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "MaintenanceConfiguration") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any + ) -> None: + """Deletes a maintenance configuration. + + Deletes a maintenance configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param config_name: The name of the maintenance configuration. Required. + :type config_name: str + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + config_name=config_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py new file mode 100644 index 00000000000..72647cc95c1 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py @@ -0,0 +1,3832 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_kubernetes_versions_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/kubernetesVersions", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "location": _SERIALIZER.url("location", location, "str", min_length=1), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters" + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_upgrade_profile_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_access_profile_request( + resource_group_name: str, resource_name: str, role_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "roleName": _SERIALIZER.url("role_name", role_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_cluster_admin_credentials_request( # pylint: disable=name-too-long + resource_group_name: str, + resource_name: str, + subscription_id: str, + *, + server_fqdn: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if server_fqdn is not None: + _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_cluster_user_credentials_request( # pylint: disable=name-too-long + resource_group_name: str, + resource_name: str, + subscription_id: str, + *, + server_fqdn: Optional[str] = None, + format: Optional[Union[str, _models.Format]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if server_fqdn is not None: + _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str") + if format is not None: + _params["format"] = _SERIALIZER.query("format", format, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_cluster_monitoring_user_credentials_request( # pylint: disable=name-too-long + resource_group_name: str, + resource_name: str, + subscription_id: str, + *, + server_fqdn: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if server_fqdn is not None: + _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_tags_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_reset_service_principal_profile_request( # pylint: disable=name-too-long + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_reset_aad_profile_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_rotate_cluster_certificates_request( # pylint: disable=name-too-long + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_abort_latest_operation_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/abort", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_rotate_service_account_signing_keys_request( # pylint: disable=name-too-long + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateServiceAccountSigningKeys", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_stop_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_start_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_run_command_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_command_result_request( + resource_group_name: str, resource_name: str, command_id: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "commandId": _SERIALIZER.url("command_id", command_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_outbound_network_dependencies_endpoints_request( # pylint: disable=name-too-long + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_mesh_revision_profiles_request( # pylint: disable=name-too-long + location: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "location": _SERIALIZER.url("location", location, "str", min_length=1), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_mesh_revision_profile_request( + location: str, mode: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles/{mode}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "location": _SERIALIZER.url("location", location, "str", min_length=1), + "mode": _SERIALIZER.url( + "mode", + mode, + "str", + max_length=24, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_mesh_upgrade_profiles_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_mesh_upgrade_profile_request( + resource_group_name: str, resource_name: str, mode: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles/{mode}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "mode": _SERIALIZER.url( + "mode", + mode, + "str", + max_length=24, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class ManagedClustersOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`managed_clusters` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult: + """Gets a list of supported Kubernetes versions in the specified subscription. + + Contains extra metadata on the version, including supported patch versions, capabilities, + available upgrades, and details on preview status of the version. + + :param location: The name of the Azure region. Required. + :type location: str + :return: KubernetesVersionListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None) + + _request = build_list_kubernetes_versions_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.ManagedCluster"]: + """Gets a list of managed clusters in the specified subscription. + + Gets a list of managed clusters in the specified subscription. + + :return: An iterator like instance of either ManagedCluster or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedClusterListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ManagedCluster"]: + """Lists managed clusters in the specified subscription and resource group. + + Lists managed clusters in the specified subscription and resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of either ManagedCluster or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedClusterListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_upgrade_profile( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.ManagedClusterUpgradeProfile: + """Gets the upgrade profile of a managed cluster. + + Gets the upgrade profile of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: ManagedClusterUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_access_profile( + self, resource_group_name: str, resource_name: str, role_name: str, **kwargs: Any + ) -> _models.ManagedClusterAccessProfile: + """Gets an access profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Instead use `ListClusterUserCredentials + `_ or + `ListClusterAdminCredentials + `_ . + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param role_name: The name of the role for managed cluster accessProfile resource. Required. + :type role_name: str + :return: ManagedClusterAccessProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAccessProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None) + + _request = build_get_access_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + role_name=role_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_cluster_admin_credentials( + self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the admin credentials of a managed cluster. + + Lists the admin credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_admin_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_cluster_user_credentials( + self, + resource_group_name: str, + resource_name: str, + server_fqdn: Optional[str] = None, + format: Optional[Union[str, _models.Format]] = None, + **kwargs: Any + ) -> _models.CredentialResults: + """Lists the user credentials of a managed cluster. + + Lists the user credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :param format: Only apply to AAD clusters, specifies the format of returned kubeconfig. Format + 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format + kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and + "exec". Default value is None. + :type format: str or ~azure.mgmt.containerservice.v2024_07_01.models.Format + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_user_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + format=format, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_cluster_monitoring_user_credentials( + self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the cluster monitoring user credentials of a managed cluster. + + Lists the cluster monitoring user credentials of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. + :type server_fqdn: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_cluster_monitoring_user_credentials_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + server_fqdn=server_fqdn, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedCluster: + """Gets a managed cluster. + + Gets a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: ManagedCluster or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedCluster, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedCluster") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedCluster, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedCluster, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Creates or updates a managed cluster. + + Creates or updates a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or + a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster or IO[bytes] + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.ManagedCluster].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.ManagedCluster]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _update_tags_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_tags_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.ManagedCluster]: + """Updates tags on a managed cluster. + + Updates tags on a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either + a TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :return: An instance of LROPoller that returns either ManagedCluster or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_tags_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.ManagedCluster].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.ManagedCluster]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes a managed cluster. + + Deletes a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _reset_service_principal_profile_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedClusterServicePrincipalProfile") + + _request = build_reset_service_principal_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedClusterServicePrincipalProfile, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Required. + :type parameters: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_reset_service_principal_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], + **kwargs: Any + ) -> LROPoller[None]: + """Reset the Service Principal Profile of a managed cluster. + + This action cannot be performed on a cluster that is not using a service principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The service principal profile to set on the managed cluster. Is either a + ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required. + :type parameters: + ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile or + IO[bytes] + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._reset_service_principal_profile_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _reset_aad_profile_initial( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedClusterAADProfile") + + _request = build_reset_aad_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.ManagedClusterAADProfile, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_reset_aad_profile( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], + **kwargs: Any + ) -> LROPoller[None]: + """Reset the AAD Profile of a managed cluster. + + **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory + integration `_ to update your cluster with AKS-managed Azure + AD. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The AAD profile to set on the Managed Cluster. Is either a + ManagedClusterAADProfile type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile or + IO[bytes] + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._reset_aad_profile_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _rotate_cluster_certificates_initial( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_rotate_cluster_certificates_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_rotate_cluster_certificates( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Rotates the certificates of a managed cluster. + + See `Certificate rotation `_ for + more details about rotating managed cluster certificates. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._rotate_cluster_certificates_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _abort_latest_operation_initial( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_abort_latest_operation_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["location"] = self._deserialize("str", response.headers.get("location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_abort_latest_operation( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Aborts last operation running on managed cluster. + + Aborts the currently running operation on the managed cluster. The Managed Cluster will be + moved to a Canceling state and eventually to a Canceled state when cancellation finishes. If + the operation completes before cancellation can take place, a 409 error code is returned. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._abort_latest_operation_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_rotate_service_account_signing_keys_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_rotate_service_account_signing_keys( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Rotates the service account signing keys of a managed cluster. + + Rotates the service account signing keys of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._rotate_service_account_signing_keys_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_stop_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]: + """Stops a Managed Cluster. + + This can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a + cluster stops the control plane and agent nodes entirely, while maintaining all object and + cluster state. A cluster does not accrue charges while it is stopped. See `stopping a cluster + `_ for more details about stopping a + cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._stop_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_start_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]: + """Starts a previously stopped Managed Cluster. + + See `starting a cluster `_ for more + details about starting a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._start_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _run_command_initial( + self, + resource_group_name: str, + resource_name: str, + request_payload: Union[_models.RunCommandRequest, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(request_payload, (IOBase, bytes)): + _content = request_payload + else: + _json = self._serialize.body(request_payload, "RunCommandRequest") + + _request = build_run_command_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: _models.RunCommandRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Required. + :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Required. + :type request_payload: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_run_command( + self, + resource_group_name: str, + resource_name: str, + request_payload: Union[_models.RunCommandRequest, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.RunCommandResult]: + """Submits a command to run against the Managed Cluster. + + AKS will create a pod to run the command. This is primarily useful for private clusters. For + more information see `AKS Run Command + `_. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param request_payload: The run command request. Is either a RunCommandRequest type or a + IO[bytes] type. Required. + :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest or + IO[bytes] + :return: An instance of LROPoller that returns either RunCommandResult or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._run_command_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + request_payload=request_payload, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.RunCommandResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.RunCommandResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace + def get_command_result( + self, resource_group_name: str, resource_name: str, command_id: str, **kwargs: Any + ) -> Optional[_models.RunCommandResult]: + """Gets the results of a command which has been run on the Managed Cluster. + + Gets the results of a command which has been run on the Managed Cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param command_id: Id of the command. Required. + :type command_id: str + :return: RunCommandResult or None or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult or None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None) + + _request = build_get_command_result_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + command_id=command_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + response_headers = {} + if response.status_code == 200: + deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response) + + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterable["_models.OutboundEnvironmentEndpoint"]: + """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the + specified managed cluster. + + Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the + specified managed cluster. The operation returns properties of each egress endpoint. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_outbound_network_dependencies_endpoints_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> Iterable["_models.MeshRevisionProfile"]: + """Lists mesh revision profiles for all meshes in the specified location. + + Contains extra metadata on each revision, including supported revisions, cluster compatibility + and available upgrades. + + :param location: The name of the Azure region. Required. + :type location: str + :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_mesh_revision_profiles_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("MeshRevisionProfileList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> _models.MeshRevisionProfile: + """Gets a mesh revision profile for a specified mesh in the specified location. + + Contains extra metadata on the revision, including supported revisions, cluster compatibility + and available upgrades. + + :param location: The name of the Azure region. Required. + :type location: str + :param mode: The mode of the mesh. Required. + :type mode: str + :return: MeshRevisionProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None) + + _request = build_get_mesh_revision_profile_request( + location=location, + mode=mode, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_mesh_upgrade_profiles( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterable["_models.MeshUpgradeProfile"]: + """Lists available upgrades for all service meshes in a specific cluster. + + Lists available upgrades for all service meshes in a specific cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_mesh_upgrade_profiles_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("MeshUpgradeProfileList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_mesh_upgrade_profile( + self, resource_group_name: str, resource_name: str, mode: str, **kwargs: Any + ) -> _models.MeshUpgradeProfile: + """Gets available upgrades for a service mesh in a cluster. + + Gets available upgrades for a service mesh in a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param mode: The mode of the mesh. Required. + :type mode: str + :return: MeshUpgradeProfile or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None) + + _request = build_get_mesh_upgrade_profile_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + mode=mode, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py new file mode 100644 index 00000000000..0245fce11b6 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py @@ -0,0 +1,155 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations") + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`operations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]: + """Gets a list of operations. + + Gets a list of operations. + + :return: An iterator like instance of either OperationValue or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("OperationListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py new file mode 100644 index 00000000000..8a8b7d422d9 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py @@ -0,0 +1,628 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterator, Optional, Type, TypeVar, Union, cast, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_request( + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class PrivateEndpointConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`private_endpoint_connections` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnectionListResult: + """Gets a list of private endpoint connections in the specified managed cluster. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: PrivateEndpointConnectionListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets the specified private endpoint connection. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, + resource_group_name: str, + resource_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, IO[bytes]], + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Updates a private endpoint connection. + + Updates a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param parameters: The updated private endpoint connection. Is either a + PrivateEndpointConnection type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection or + IO[bytes] + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "PrivateEndpointConnection") + + _request = build_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _delete_initial( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a private endpoint connection. + + Deletes a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + private_endpoint_connection_name=private_endpoint_connection_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py new file mode 100644 index 00000000000..8c42443c082 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py @@ -0,0 +1,158 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, Optional, Type, TypeVar + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class PrivateLinkResourcesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`private_link_resources` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> _models.PrivateLinkResourcesListResult: + """Gets a list of private link resources in the specified managed cluster. + + To learn more about private clusters, see: + https://docs.microsoft.com/azure/aks/private-clusters. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: PrivateLinkResourcesListResult or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResourcesListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None) + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py new file mode 100644 index 00000000000..71b08f43f19 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py @@ -0,0 +1,239 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_post_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class ResolvePrivateLinkServiceIdOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`resolve_private_link_service_id` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @overload + def post( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.PrivateLinkResource, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def post( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def post( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.PrivateLinkResource, IO[bytes]], + **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets the private link service ID for the specified managed cluster. + + Gets the private link service ID for the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters required in order to resolve a private link service ID. Is either + a PrivateLinkResource type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource or + IO[bytes] + :return: PrivateLinkResource or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "PrivateLinkResource") + + _request = build_post_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py new file mode 100644 index 00000000000..ecc1585ad84 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py @@ -0,0 +1,819 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterable, Optional, Type, TypeVar, Union, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots") + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_tags_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class SnapshotsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`snapshots` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.Snapshot"]: + """Gets a list of snapshots in the specified subscription. + + Gets a list of snapshots in the specified subscription. + + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Snapshot"]: + """Lists snapshots in the specified subscription and resource group. + + Lists snapshots in the specified subscription and resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.Snapshot: + """Gets a snapshot. + + Gets a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.Snapshot, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.Snapshot, IO[bytes]], + **kwargs: Any + ) -> _models.Snapshot: + """Creates or updates a snapshot. + + Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes] + type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot or IO[bytes] + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "Snapshot") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_tags( + self, + resource_group_name: str, + resource_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> _models.Snapshot: + """Updates tags on a snapshot. + + Updates tags on a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a + TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_tags_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> None: + """Deletes a snapshot. + + Deletes a snapshot. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py new file mode 100644 index 00000000000..70dd5f8d839 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py @@ -0,0 +1,745 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import sys +from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "trustedAccessRoleBindingName": _SERIALIZER.url( + "trusted_access_role_binding_name", + trusted_access_role_binding_name, + "str", + max_length=24, + min_length=1, + pattern=r"^([A-Za-z0-9-])+$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "trustedAccessRoleBindingName": _SERIALIZER.url( + "trusted_access_role_binding_name", + trusted_access_role_binding_name, + "str", + max_length=24, + min_length=1, + pattern=r"^([A-Za-z0-9-])+$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "trustedAccessRoleBindingName": _SERIALIZER.url( + "trusted_access_role_binding_name", + trusted_access_role_binding_name, + "str", + max_length=24, + min_length=1, + pattern=r"^([A-Za-z0-9-])+$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class TrustedAccessRoleBindingsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`trusted_access_role_bindings` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> Iterable["_models.TrustedAccessRoleBinding"]: + """List trusted access role bindings. + + List trusted access role bindings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either TrustedAccessRoleBinding or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleBindingListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> _models.TrustedAccessRoleBinding: + """Get a trusted access role binding. + + Get a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :return: TrustedAccessRoleBinding or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(trusted_access_role_binding, (IOBase, bytes)): + _content = trusted_access_role_binding + else: + _json = self._serialize.body(trusted_access_role_binding, "TrustedAccessRoleBinding") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: _models.TrustedAccessRoleBinding, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Required. + :type trusted_access_role_binding: + ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Required. + :type trusted_access_role_binding: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + trusted_access_role_binding_name: str, + trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.TrustedAccessRoleBinding]: + """Create or update a trusted access role binding. + + Create or update a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :param trusted_access_role_binding: A trusted access role binding. Is either a + TrustedAccessRoleBinding type or a IO[bytes] type. Required. + :type trusted_access_role_binding: + ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding or IO[bytes] + :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + trusted_access_role_binding=trusted_access_role_binding, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.TrustedAccessRoleBinding].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.TrustedAccessRoleBinding]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Delete a trusted access role binding. + + Delete a trusted access role binding. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param trusted_access_role_binding_name: The name of trusted access role binding. Required. + :type trusted_access_role_binding_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + trusted_access_role_binding_name=trusted_access_role_binding_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py new file mode 100644 index 00000000000..c90fd5c90c7 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py @@ -0,0 +1,168 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "location": _SERIALIZER.url("location", location, "str", min_length=1), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class TrustedAccessRolesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :attr:`trusted_access_roles` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]: + """List supported trusted access roles. + + List supported trusted access roles. + + :param location: The name of the Azure region. Required. + :type location: str + :return: An iterator like instance of either TrustedAccessRole or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_request( + location=location, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/__init__.py new file mode 100644 index 00000000000..05bac22d8ec --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._source_control_configuration_client import SourceControlConfigurationClient +__all__ = ['SourceControlConfigurationClient'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass + +from ._version import VERSION + +__version__ = VERSION diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_configuration.py new file mode 100644 index 00000000000..ece4e8f4f3e --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_configuration.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + +class SourceControlConfigurationClientConfiguration(Configuration): + """Configuration for SourceControlConfigurationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + **kwargs: Any + ): + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(SourceControlConfigurationClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'azure-mgmt-kubernetesconfiguration/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_serialization.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_serialization.py new file mode 100644 index 00000000000..a00658b1fc1 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_serialization.py @@ -0,0 +1,2012 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError, raise_with_traceback +from azure.core.serialization import NULL as AzureCoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise_with_traceback(DeserializationError, "XML is invalid") + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +try: + basestring # type: ignore + unicode_str = unicode # type: ignore +except NameError: + basestring = str + unicode_str = str + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Dict[str, Any] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to azure from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[ + [str, Dict[str, Any], Any], Any + ] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]]=None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = unicode_str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise_with_traceback(SerializationError, msg, err) + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise_with_traceback(SerializationError, "Unable to build a model: " + str(err), err) + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :keyword bool skip_quote: Whether to skip quote the serialized result. + Defaults to False. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get('skip_quote', False) + return str(self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is AzureCoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise_with_traceback(SerializationError, msg.format(data, data_type), err) + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :keyword bool do_quote: Whether to quote the serialized result of each iterable element. + Defaults to False. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get('do_quote', False): + serialized = [ + '' if s is None else quote(str(s), safe='') + for s + in serialized + ] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is unicode_str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(SerializationError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]]=None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, basestring): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise_with_traceback(DeserializationError, msg, err) + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, basestring): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise_with_traceback(DeserializationError, msg, err) + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, basestring): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node
is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, basestring): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + # https://github.com/Azure/azure-rest-api-specs/issues/141 + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(attr) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise_with_traceback(DeserializationError, msg, err) + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise_with_traceback(DeserializationError, msg, err) + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_source_control_configuration_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_source_control_configuration_client.py new file mode 100644 index 00000000000..fc4b01f2b29 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_source_control_configuration_client.py @@ -0,0 +1,515 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.mgmt.core import ARMPipelineClient +from azure.profiles import KnownProfiles, ProfileDefinition +from azure.profiles.multiapiclient import MultiApiClientMixin + +from ._configuration import SourceControlConfigurationClientConfiguration +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + +class _SDKClient(object): + def __init__(self, *args, **kwargs): + """This is a fake class to support current implemetation of MultiApiClientMixin." + Will be removed in final version of multiapi azure-core based client + """ + pass + +class SourceControlConfigurationClient(MultiApiClientMixin, _SDKClient): + """KubernetesConfiguration Client. + + This ready contains multiple API versions, to help you deal with all of the Azure clouds + (Azure Stack, Azure Government, Azure China, etc.). + By default, it uses the latest API version available on public Azure. + For production, you should stick to a particular api-version and/or profile. + The profile sets a mapping between an operation group and its API version. + The api-version parameter sets the default API version if the operation + group is not described in the profile. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param api_version: API version to use if no profile is provided, or if missing in profile. + :type api_version: str + :param base_url: Service URL + :type base_url: str + :param profile: A profile definition, from KnownProfiles to dict. + :type profile: azure.profiles.KnownProfiles + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + DEFAULT_API_VERSION = '2023-05-01' + _PROFILE_TAG = "azure.mgmt.kubernetesconfiguration.SourceControlConfigurationClient" + LATEST_PROFILE = ProfileDefinition({ + _PROFILE_TAG: { + None: DEFAULT_API_VERSION, + 'cluster_extension_type': '2022-01-15-preview', + 'cluster_extension_types': '2022-01-15-preview', + 'extension_type_versions': '2022-01-15-preview', + 'location_extension_types': '2022-01-15-preview', + 'private_endpoint_connections': '2022-04-02-preview', + 'private_link_resources': '2022-04-02-preview', + 'private_link_scopes': '2022-04-02-preview', + }}, + _PROFILE_TAG + " latest" + ) + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + api_version: Optional[str]=None, + base_url: str = "https://management.azure.com", + profile: KnownProfiles=KnownProfiles.default, + **kwargs: Any + ): + if api_version: + kwargs.setdefault('api_version', api_version) + self._config = SourceControlConfigurationClientConfiguration(credential, subscription_id, **kwargs) + self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + super(SourceControlConfigurationClient, self).__init__( + api_version=api_version, + profile=profile + ) + + @classmethod + def _models_dict(cls, api_version): + return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} + + @classmethod + def models(cls, api_version=DEFAULT_API_VERSION): + """Module depends on the API version: + + * 2020-07-01-preview: :mod:`v2020_07_01_preview.models` + * 2020-10-01-preview: :mod:`v2020_10_01_preview.models` + * 2021-03-01: :mod:`v2021_03_01.models` + * 2021-05-01-preview: :mod:`v2021_05_01_preview.models` + * 2021-09-01: :mod:`v2021_09_01.models` + * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` + * 2022-01-01-preview: :mod:`v2022_01_01_preview.models` + * 2022-01-15-preview: :mod:`v2022_01_15_preview.models` + * 2022-03-01: :mod:`v2022_03_01.models` + * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` + * 2022-07-01: :mod:`v2022_07_01.models` + * 2022-11-01: :mod:`v2022_11_01.models` + * 2023-05-01: :mod:`v2023_05_01.models` + """ + if api_version == '2020-07-01-preview': + from .v2020_07_01_preview import models + return models + elif api_version == '2020-10-01-preview': + from .v2020_10_01_preview import models + return models + elif api_version == '2021-03-01': + from .v2021_03_01 import models + return models + elif api_version == '2021-05-01-preview': + from .v2021_05_01_preview import models + return models + elif api_version == '2021-09-01': + from .v2021_09_01 import models + return models + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview import models + return models + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview import models + return models + elif api_version == '2022-01-15-preview': + from .v2022_01_15_preview import models + return models + elif api_version == '2022-03-01': + from .v2022_03_01 import models + return models + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview import models + return models + elif api_version == '2022-07-01': + from .v2022_07_01 import models + return models + elif api_version == '2022-11-01': + from .v2022_11_01 import models + return models + elif api_version == '2023-05-01': + from .v2023_05_01 import models + return models + raise ValueError("API version {} is not available".format(api_version)) + + @property + def cluster_extension_type(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ClusterExtensionTypeOperations` + * 2021-11-01-preview: :class:`ClusterExtensionTypeOperations` + * 2022-01-01-preview: :class:`ClusterExtensionTypeOperations` + * 2022-01-15-preview: :class:`ClusterExtensionTypeOperations` + """ + api_version = self._get_api_version('cluster_extension_type') + if api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2022-01-15-preview': + from .v2022_01_15_preview.operations import ClusterExtensionTypeOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'cluster_extension_type'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def cluster_extension_types(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ClusterExtensionTypesOperations` + * 2021-11-01-preview: :class:`ClusterExtensionTypesOperations` + * 2022-01-01-preview: :class:`ClusterExtensionTypesOperations` + * 2022-01-15-preview: :class:`ClusterExtensionTypesOperations` + """ + api_version = self._get_api_version('cluster_extension_types') + if api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2022-01-15-preview': + from .v2022_01_15_preview.operations import ClusterExtensionTypesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'cluster_extension_types'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def extension_type_versions(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2021-11-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2022-01-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2022-01-15-preview: :class:`ExtensionTypeVersionsOperations` + """ + api_version = self._get_api_version('extension_type_versions') + if api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2022-01-15-preview': + from .v2022_01_15_preview.operations import ExtensionTypeVersionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'extension_type_versions'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def extensions(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`ExtensionsOperations` + * 2021-05-01-preview: :class:`ExtensionsOperations` + * 2021-09-01: :class:`ExtensionsOperations` + * 2021-11-01-preview: :class:`ExtensionsOperations` + * 2022-01-01-preview: :class:`ExtensionsOperations` + * 2022-03-01: :class:`ExtensionsOperations` + * 2022-04-02-preview: :class:`ExtensionsOperations` + * 2022-07-01: :class:`ExtensionsOperations` + * 2022-11-01: :class:`ExtensionsOperations` + * 2023-05-01: :class:`ExtensionsOperations` + """ + api_version = self._get_api_version('extensions') + if api_version == '2020-07-01-preview': + from .v2020_07_01_preview.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import ExtensionsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import ExtensionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'extensions'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def flux_config_operation_status(self): + """Instance depends on the API version: + + * 2021-11-01-preview: :class:`FluxConfigOperationStatusOperations` + * 2022-01-01-preview: :class:`FluxConfigOperationStatusOperations` + * 2022-03-01: :class:`FluxConfigOperationStatusOperations` + * 2022-07-01: :class:`FluxConfigOperationStatusOperations` + * 2022-11-01: :class:`FluxConfigOperationStatusOperations` + * 2023-05-01: :class:`FluxConfigOperationStatusOperations` + """ + api_version = self._get_api_version('flux_config_operation_status') + if api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import FluxConfigOperationStatusOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'flux_config_operation_status'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def flux_configurations(self): + """Instance depends on the API version: + + * 2021-11-01-preview: :class:`FluxConfigurationsOperations` + * 2022-01-01-preview: :class:`FluxConfigurationsOperations` + * 2022-03-01: :class:`FluxConfigurationsOperations` + * 2022-07-01: :class:`FluxConfigurationsOperations` + * 2022-11-01: :class:`FluxConfigurationsOperations` + * 2023-05-01: :class:`FluxConfigurationsOperations` + """ + api_version = self._get_api_version('flux_configurations') + if api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import FluxConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'flux_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def location_extension_types(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`LocationExtensionTypesOperations` + * 2021-11-01-preview: :class:`LocationExtensionTypesOperations` + * 2022-01-01-preview: :class:`LocationExtensionTypesOperations` + * 2022-01-15-preview: :class:`LocationExtensionTypesOperations` + """ + api_version = self._get_api_version('location_extension_types') + if api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2022-01-15-preview': + from .v2022_01_15_preview.operations import LocationExtensionTypesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'location_extension_types'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operation_status(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`OperationStatusOperations` + * 2021-09-01: :class:`OperationStatusOperations` + * 2021-11-01-preview: :class:`OperationStatusOperations` + * 2022-01-01-preview: :class:`OperationStatusOperations` + * 2022-03-01: :class:`OperationStatusOperations` + * 2022-04-02-preview: :class:`OperationStatusOperations` + * 2022-07-01: :class:`OperationStatusOperations` + * 2022-11-01: :class:`OperationStatusOperations` + * 2023-05-01: :class:`OperationStatusOperations` + """ + api_version = self._get_api_version('operation_status') + if api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import OperationStatusOperations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import OperationStatusOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import OperationStatusOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import OperationStatusOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operation_status'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operations(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`Operations` + * 2020-10-01-preview: :class:`Operations` + * 2021-03-01: :class:`Operations` + * 2021-05-01-preview: :class:`Operations` + * 2021-09-01: :class:`Operations` + * 2021-11-01-preview: :class:`Operations` + * 2022-01-01-preview: :class:`Operations` + * 2022-03-01: :class:`Operations` + * 2022-07-01: :class:`Operations` + * 2022-11-01: :class:`Operations` + * 2023-05-01: :class:`Operations` + """ + api_version = self._get_api_version('operations') + if api_version == '2020-07-01-preview': + from .v2020_07_01_preview.operations import Operations as OperationClass + elif api_version == '2020-10-01-preview': + from .v2020_10_01_preview.operations import Operations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import Operations as OperationClass + elif api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import Operations as OperationClass + elif api_version == '2021-09-01': + from .v2021_09_01.operations import Operations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import Operations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import Operations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import Operations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import Operations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import Operations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import Operations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_endpoint_connections(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + """ + api_version = self._get_api_version('private_endpoint_connections') + if api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_resources(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` + """ + api_version = self._get_api_version('private_link_resources') + if api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_scopes(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateLinkScopesOperations` + """ + api_version = self._get_api_version('private_link_scopes') + if api_version == '2022-04-02-preview': + from .v2022_04_02_preview.operations import PrivateLinkScopesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_scopes'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def source_control_configurations(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`SourceControlConfigurationsOperations` + * 2020-10-01-preview: :class:`SourceControlConfigurationsOperations` + * 2021-03-01: :class:`SourceControlConfigurationsOperations` + * 2021-05-01-preview: :class:`SourceControlConfigurationsOperations` + * 2021-11-01-preview: :class:`SourceControlConfigurationsOperations` + * 2022-01-01-preview: :class:`SourceControlConfigurationsOperations` + * 2022-03-01: :class:`SourceControlConfigurationsOperations` + * 2022-07-01: :class:`SourceControlConfigurationsOperations` + * 2022-11-01: :class:`SourceControlConfigurationsOperations` + * 2023-05-01: :class:`SourceControlConfigurationsOperations` + """ + api_version = self._get_api_version('source_control_configurations') + if api_version == '2020-07-01-preview': + from .v2020_07_01_preview.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2020-10-01-preview': + from .v2020_10_01_preview.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-03-01': + from .v2021_03_01.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-05-01-preview': + from .v2021_05_01_preview.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from .v2021_11_01_preview.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from .v2022_01_01_preview.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from .v2022_03_01.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from .v2022_07_01.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from .v2022_11_01.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from .v2023_05_01.operations import SourceControlConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'source_control_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_version.py new file mode 100644 index 00000000000..21c32050402 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/_version.py @@ -0,0 +1,8 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "3.1.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/__init__.py new file mode 100644 index 00000000000..ba52c91a7ba --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._source_control_configuration_client import SourceControlConfigurationClient +__all__ = ['SourceControlConfigurationClient'] diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_configuration.py new file mode 100644 index 00000000000..f1676e683a7 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_configuration.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +class SourceControlConfigurationClientConfiguration(Configuration): + """Configuration for SourceControlConfigurationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(SourceControlConfigurationClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'azure-mgmt-kubernetesconfiguration/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_source_control_configuration_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_source_control_configuration_client.py new file mode 100644 index 00000000000..26722aeb3d6 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/aio/_source_control_configuration_client.py @@ -0,0 +1,515 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.mgmt.core import AsyncARMPipelineClient +from azure.profiles import KnownProfiles, ProfileDefinition +from azure.profiles.multiapiclient import MultiApiClientMixin + +from .._serialization import Deserializer, Serializer +from ._configuration import SourceControlConfigurationClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +class _SDKClient(object): + def __init__(self, *args, **kwargs): + """This is a fake class to support current implemetation of MultiApiClientMixin." + Will be removed in final version of multiapi azure-core based client + """ + pass + +class SourceControlConfigurationClient(MultiApiClientMixin, _SDKClient): + """KubernetesConfiguration Client. + + This ready contains multiple API versions, to help you deal with all of the Azure clouds + (Azure Stack, Azure Government, Azure China, etc.). + By default, it uses the latest API version available on public Azure. + For production, you should stick to a particular api-version and/or profile. + The profile sets a mapping between an operation group and its API version. + The api-version parameter sets the default API version if the operation + group is not described in the profile. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param api_version: API version to use if no profile is provided, or if missing in profile. + :type api_version: str + :param base_url: Service URL + :type base_url: str + :param profile: A profile definition, from KnownProfiles to dict. + :type profile: azure.profiles.KnownProfiles + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + DEFAULT_API_VERSION = '2023-05-01' + _PROFILE_TAG = "azure.mgmt.kubernetesconfiguration.SourceControlConfigurationClient" + LATEST_PROFILE = ProfileDefinition({ + _PROFILE_TAG: { + None: DEFAULT_API_VERSION, + 'cluster_extension_type': '2022-01-15-preview', + 'cluster_extension_types': '2022-01-15-preview', + 'extension_type_versions': '2022-01-15-preview', + 'location_extension_types': '2022-01-15-preview', + 'private_endpoint_connections': '2022-04-02-preview', + 'private_link_resources': '2022-04-02-preview', + 'private_link_scopes': '2022-04-02-preview', + }}, + _PROFILE_TAG + " latest" + ) + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + api_version: Optional[str] = None, + base_url: str = "https://management.azure.com", + profile: KnownProfiles = KnownProfiles.default, + **kwargs: Any + ) -> None: + if api_version: + kwargs.setdefault('api_version', api_version) + self._config = SourceControlConfigurationClientConfiguration(credential, subscription_id, **kwargs) + self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + super(SourceControlConfigurationClient, self).__init__( + api_version=api_version, + profile=profile + ) + + @classmethod + def _models_dict(cls, api_version): + return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} + + @classmethod + def models(cls, api_version=DEFAULT_API_VERSION): + """Module depends on the API version: + + * 2020-07-01-preview: :mod:`v2020_07_01_preview.models` + * 2020-10-01-preview: :mod:`v2020_10_01_preview.models` + * 2021-03-01: :mod:`v2021_03_01.models` + * 2021-05-01-preview: :mod:`v2021_05_01_preview.models` + * 2021-09-01: :mod:`v2021_09_01.models` + * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` + * 2022-01-01-preview: :mod:`v2022_01_01_preview.models` + * 2022-01-15-preview: :mod:`v2022_01_15_preview.models` + * 2022-03-01: :mod:`v2022_03_01.models` + * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` + * 2022-07-01: :mod:`v2022_07_01.models` + * 2022-11-01: :mod:`v2022_11_01.models` + * 2023-05-01: :mod:`v2023_05_01.models` + """ + if api_version == '2020-07-01-preview': + from ..v2020_07_01_preview import models + return models + elif api_version == '2020-10-01-preview': + from ..v2020_10_01_preview import models + return models + elif api_version == '2021-03-01': + from ..v2021_03_01 import models + return models + elif api_version == '2021-05-01-preview': + from ..v2021_05_01_preview import models + return models + elif api_version == '2021-09-01': + from ..v2021_09_01 import models + return models + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview import models + return models + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview import models + return models + elif api_version == '2022-01-15-preview': + from ..v2022_01_15_preview import models + return models + elif api_version == '2022-03-01': + from ..v2022_03_01 import models + return models + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview import models + return models + elif api_version == '2022-07-01': + from ..v2022_07_01 import models + return models + elif api_version == '2022-11-01': + from ..v2022_11_01 import models + return models + elif api_version == '2023-05-01': + from ..v2023_05_01 import models + return models + raise ValueError("API version {} is not available".format(api_version)) + + @property + def cluster_extension_type(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ClusterExtensionTypeOperations` + * 2021-11-01-preview: :class:`ClusterExtensionTypeOperations` + * 2022-01-01-preview: :class:`ClusterExtensionTypeOperations` + * 2022-01-15-preview: :class:`ClusterExtensionTypeOperations` + """ + api_version = self._get_api_version('cluster_extension_type') + if api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import ClusterExtensionTypeOperations as OperationClass + elif api_version == '2022-01-15-preview': + from ..v2022_01_15_preview.aio.operations import ClusterExtensionTypeOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'cluster_extension_type'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def cluster_extension_types(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ClusterExtensionTypesOperations` + * 2021-11-01-preview: :class:`ClusterExtensionTypesOperations` + * 2022-01-01-preview: :class:`ClusterExtensionTypesOperations` + * 2022-01-15-preview: :class:`ClusterExtensionTypesOperations` + """ + api_version = self._get_api_version('cluster_extension_types') + if api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import ClusterExtensionTypesOperations as OperationClass + elif api_version == '2022-01-15-preview': + from ..v2022_01_15_preview.aio.operations import ClusterExtensionTypesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'cluster_extension_types'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def extension_type_versions(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2021-11-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2022-01-01-preview: :class:`ExtensionTypeVersionsOperations` + * 2022-01-15-preview: :class:`ExtensionTypeVersionsOperations` + """ + api_version = self._get_api_version('extension_type_versions') + if api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import ExtensionTypeVersionsOperations as OperationClass + elif api_version == '2022-01-15-preview': + from ..v2022_01_15_preview.aio.operations import ExtensionTypeVersionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'extension_type_versions'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def extensions(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`ExtensionsOperations` + * 2021-05-01-preview: :class:`ExtensionsOperations` + * 2021-09-01: :class:`ExtensionsOperations` + * 2021-11-01-preview: :class:`ExtensionsOperations` + * 2022-01-01-preview: :class:`ExtensionsOperations` + * 2022-03-01: :class:`ExtensionsOperations` + * 2022-04-02-preview: :class:`ExtensionsOperations` + * 2022-07-01: :class:`ExtensionsOperations` + * 2022-11-01: :class:`ExtensionsOperations` + * 2023-05-01: :class:`ExtensionsOperations` + """ + api_version = self._get_api_version('extensions') + if api_version == '2020-07-01-preview': + from ..v2020_07_01_preview.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import ExtensionsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import ExtensionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'extensions'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def flux_config_operation_status(self): + """Instance depends on the API version: + + * 2021-11-01-preview: :class:`FluxConfigOperationStatusOperations` + * 2022-01-01-preview: :class:`FluxConfigOperationStatusOperations` + * 2022-03-01: :class:`FluxConfigOperationStatusOperations` + * 2022-07-01: :class:`FluxConfigOperationStatusOperations` + * 2022-11-01: :class:`FluxConfigOperationStatusOperations` + * 2023-05-01: :class:`FluxConfigOperationStatusOperations` + """ + api_version = self._get_api_version('flux_config_operation_status') + if api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import FluxConfigOperationStatusOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import FluxConfigOperationStatusOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'flux_config_operation_status'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def flux_configurations(self): + """Instance depends on the API version: + + * 2021-11-01-preview: :class:`FluxConfigurationsOperations` + * 2022-01-01-preview: :class:`FluxConfigurationsOperations` + * 2022-03-01: :class:`FluxConfigurationsOperations` + * 2022-07-01: :class:`FluxConfigurationsOperations` + * 2022-11-01: :class:`FluxConfigurationsOperations` + * 2023-05-01: :class:`FluxConfigurationsOperations` + """ + api_version = self._get_api_version('flux_configurations') + if api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import FluxConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import FluxConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'flux_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def location_extension_types(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`LocationExtensionTypesOperations` + * 2021-11-01-preview: :class:`LocationExtensionTypesOperations` + * 2022-01-01-preview: :class:`LocationExtensionTypesOperations` + * 2022-01-15-preview: :class:`LocationExtensionTypesOperations` + """ + api_version = self._get_api_version('location_extension_types') + if api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import LocationExtensionTypesOperations as OperationClass + elif api_version == '2022-01-15-preview': + from ..v2022_01_15_preview.aio.operations import LocationExtensionTypesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'location_extension_types'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operation_status(self): + """Instance depends on the API version: + + * 2021-05-01-preview: :class:`OperationStatusOperations` + * 2021-09-01: :class:`OperationStatusOperations` + * 2021-11-01-preview: :class:`OperationStatusOperations` + * 2022-01-01-preview: :class:`OperationStatusOperations` + * 2022-03-01: :class:`OperationStatusOperations` + * 2022-04-02-preview: :class:`OperationStatusOperations` + * 2022-07-01: :class:`OperationStatusOperations` + * 2022-11-01: :class:`OperationStatusOperations` + * 2023-05-01: :class:`OperationStatusOperations` + """ + api_version = self._get_api_version('operation_status') + if api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import OperationStatusOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import OperationStatusOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operation_status'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def operations(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`Operations` + * 2020-10-01-preview: :class:`Operations` + * 2021-03-01: :class:`Operations` + * 2021-05-01-preview: :class:`Operations` + * 2021-09-01: :class:`Operations` + * 2021-11-01-preview: :class:`Operations` + * 2022-01-01-preview: :class:`Operations` + * 2022-03-01: :class:`Operations` + * 2022-07-01: :class:`Operations` + * 2022-11-01: :class:`Operations` + * 2023-05-01: :class:`Operations` + """ + api_version = self._get_api_version('operations') + if api_version == '2020-07-01-preview': + from ..v2020_07_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2020-10-01-preview': + from ..v2020_10_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import Operations as OperationClass + elif api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2021-09-01': + from ..v2021_09_01.aio.operations import Operations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import Operations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import Operations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import Operations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import Operations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import Operations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_endpoint_connections(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` + """ + api_version = self._get_api_version('private_endpoint_connections') + if api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_resources(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` + """ + api_version = self._get_api_version('private_link_resources') + if api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def private_link_scopes(self): + """Instance depends on the API version: + + * 2022-04-02-preview: :class:`PrivateLinkScopesOperations` + """ + api_version = self._get_api_version('private_link_scopes') + if api_version == '2022-04-02-preview': + from ..v2022_04_02_preview.aio.operations import PrivateLinkScopesOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'private_link_scopes'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + @property + def source_control_configurations(self): + """Instance depends on the API version: + + * 2020-07-01-preview: :class:`SourceControlConfigurationsOperations` + * 2020-10-01-preview: :class:`SourceControlConfigurationsOperations` + * 2021-03-01: :class:`SourceControlConfigurationsOperations` + * 2021-05-01-preview: :class:`SourceControlConfigurationsOperations` + * 2021-11-01-preview: :class:`SourceControlConfigurationsOperations` + * 2022-01-01-preview: :class:`SourceControlConfigurationsOperations` + * 2022-03-01: :class:`SourceControlConfigurationsOperations` + * 2022-07-01: :class:`SourceControlConfigurationsOperations` + * 2022-11-01: :class:`SourceControlConfigurationsOperations` + * 2023-05-01: :class:`SourceControlConfigurationsOperations` + """ + api_version = self._get_api_version('source_control_configurations') + if api_version == '2020-07-01-preview': + from ..v2020_07_01_preview.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2020-10-01-preview': + from ..v2020_10_01_preview.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-03-01': + from ..v2021_03_01.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-05-01-preview': + from ..v2021_05_01_preview.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2021-11-01-preview': + from ..v2021_11_01_preview.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-01-01-preview': + from ..v2022_01_01_preview.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-03-01': + from ..v2022_03_01.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-07-01': + from ..v2022_07_01.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2022-11-01': + from ..v2022_11_01.aio.operations import SourceControlConfigurationsOperations as OperationClass + elif api_version == '2023-05-01': + from ..v2023_05_01.aio.operations import SourceControlConfigurationsOperations as OperationClass + else: + raise ValueError("API version {} does not have operation group 'source_control_configurations'".format(api_version)) + self._config.api_version = api_version + return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/models.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/models.py new file mode 100644 index 00000000000..9979cbfa0aa --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/models.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from .v2022_01_15_preview.models import * +from .v2022_04_02_preview.models import * +from .v2023_05_01.models import * diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/py.typed b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/__init__.py new file mode 100644 index 00000000000..3ad4bd8b604 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/__init__.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._source_control_configuration_client import SourceControlConfigurationClient +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "SourceControlConfigurationClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_configuration.py new file mode 100644 index 00000000000..04ecbde73e8 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class SourceControlConfigurationClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for SourceControlConfigurationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2023-05-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None: + super(SourceControlConfigurationClientConfiguration, self).__init__(**kwargs) + api_version: str = kwargs.pop("api_version", "2023-05-01") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-kubernetesconfiguration/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_source_control_configuration_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_source_control_configuration_client.py new file mode 100644 index 00000000000..2436b32f0ce --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_source_control_configuration_client.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING + +from azure.core.rest import HttpRequest, HttpResponse +from azure.mgmt.core import ARMPipelineClient + +from . import models as _models +from .._serialization import Deserializer, Serializer +from ._configuration import SourceControlConfigurationClientConfiguration +from .operations import ( + ExtensionsOperations, + FluxConfigOperationStatusOperations, + FluxConfigurationsOperations, + OperationStatusOperations, + Operations, + SourceControlConfigurationsOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class SourceControlConfigurationClient: # pylint: disable=client-accepts-api-version-keyword + """KubernetesConfiguration Client. + + :ivar extensions: ExtensionsOperations operations + :vartype extensions: + azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.ExtensionsOperations + :ivar operation_status: OperationStatusOperations operations + :vartype operation_status: + azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.OperationStatusOperations + :ivar flux_configurations: FluxConfigurationsOperations operations + :vartype flux_configurations: + azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.FluxConfigurationsOperations + :ivar flux_config_operation_status: FluxConfigOperationStatusOperations operations + :vartype flux_config_operation_status: + azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.FluxConfigOperationStatusOperations + :ivar source_control_configurations: SourceControlConfigurationsOperations operations + :vartype source_control_configurations: + azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.SourceControlConfigurationsOperations + :ivar operations: Operations operations + :vartype operations: azure.mgmt.kubernetesconfiguration.v2023_05_01.operations.Operations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2023-05-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = SourceControlConfigurationClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.extensions = ExtensionsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.operation_status = OperationStatusOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.flux_configurations = FluxConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.flux_config_operation_status = FluxConfigOperationStatusOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.source_control_configurations = SourceControlConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2023-05-01") + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "SourceControlConfigurationClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_vendor.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_vendor.py new file mode 100644 index 00000000000..0dafe0e287f --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_vendor.py @@ -0,0 +1,16 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.pipeline.transport import HttpRequest + + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_version.py new file mode 100644 index 00000000000..47babc28d5e --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "3.1.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/__init__.py new file mode 100644 index 00000000000..b95230ae03c --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._source_control_configuration_client import SourceControlConfigurationClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "SourceControlConfigurationClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_configuration.py new file mode 100644 index 00000000000..fcf3f1ae427 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class SourceControlConfigurationClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for SourceControlConfigurationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2023-05-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None: + super(SourceControlConfigurationClientConfiguration, self).__init__(**kwargs) + api_version: str = kwargs.pop("api_version", "2023-05-01") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-kubernetesconfiguration/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_source_control_configuration_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_source_control_configuration_client.py new file mode 100644 index 00000000000..404cad25c87 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/_source_control_configuration_client.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING + +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.mgmt.core import AsyncARMPipelineClient + +from .. import models as _models +from ..._serialization import Deserializer, Serializer +from ._configuration import SourceControlConfigurationClientConfiguration +from .operations import ( + ExtensionsOperations, + FluxConfigOperationStatusOperations, + FluxConfigurationsOperations, + OperationStatusOperations, + Operations, + SourceControlConfigurationsOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class SourceControlConfigurationClient: # pylint: disable=client-accepts-api-version-keyword + """KubernetesConfiguration Client. + + :ivar extensions: ExtensionsOperations operations + :vartype extensions: + azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.ExtensionsOperations + :ivar operation_status: OperationStatusOperations operations + :vartype operation_status: + azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.OperationStatusOperations + :ivar flux_configurations: FluxConfigurationsOperations operations + :vartype flux_configurations: + azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.FluxConfigurationsOperations + :ivar flux_config_operation_status: FluxConfigOperationStatusOperations operations + :vartype flux_config_operation_status: + azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.FluxConfigOperationStatusOperations + :ivar source_control_configurations: SourceControlConfigurationsOperations operations + :vartype source_control_configurations: + azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.SourceControlConfigurationsOperations + :ivar operations: Operations operations + :vartype operations: azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.operations.Operations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2023-05-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = SourceControlConfigurationClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.extensions = ExtensionsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.operation_status = OperationStatusOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.flux_configurations = FluxConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.flux_config_operation_status = FluxConfigOperationStatusOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.source_control_configurations = SourceControlConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-05-01" + ) + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2023-05-01") + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "SourceControlConfigurationClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/__init__.py new file mode 100644 index 00000000000..9d58b5443a0 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._extensions_operations import ExtensionsOperations +from ._operation_status_operations import OperationStatusOperations +from ._flux_configurations_operations import FluxConfigurationsOperations +from ._flux_config_operation_status_operations import FluxConfigOperationStatusOperations +from ._source_control_configurations_operations import SourceControlConfigurationsOperations +from ._operations import Operations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ExtensionsOperations", + "OperationStatusOperations", + "FluxConfigurationsOperations", + "FluxConfigOperationStatusOperations", + "SourceControlConfigurationsOperations", + "Operations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_extensions_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_extensions_operations.py new file mode 100644 index 00000000000..494585bd151 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_extensions_operations.py @@ -0,0 +1,947 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._extensions_operations import ( + build_create_request, + build_delete_request, + build_get_request, + build_list_request, + build_update_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ExtensionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`extensions` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _create_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: Union[_models.Extension, IO], + **kwargs: Any + ) -> _models.Extension: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(extension, (IOBase, bytes)): + _content = extension + else: + _json = self._serialize.body(extension, "Extension") + + request = build_create_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Extension", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @overload + async def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: _models.Extension, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Required. + :type extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Required. + :type extension: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: Union[_models.Extension, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Is either a Extension type or a + IO type. Required. + :type extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + extension=extension, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Extension", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + **kwargs: Any + ) -> _models.Extension: + """Gets Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Extension or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + force_delete=force_delete, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace_async + async def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Delete a Kubernetes Cluster Extension. This will cause the Agent to Uninstall the extension + from the cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param force_delete: Delete the extension resource in Azure - not the normal asynchronous + delete. Default value is None. + :type force_delete: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + force_delete=force_delete, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + async def _update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: Union[_models.PatchExtension, IO], + **kwargs: Any + ) -> _models.Extension: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(patch_extension, (IOBase, bytes)): + _content = patch_extension + else: + _json = self._serialize.body(patch_extension, "PatchExtension") + + request = build_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Extension", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: _models.PatchExtension, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Required. + :type patch_extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PatchExtension + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Required. + :type patch_extension: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: Union[_models.PatchExtension, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Is either a + PatchExtension type or a IO type. Required. + :type patch_extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PatchExtension or + IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Extension or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + patch_extension=patch_extension, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Extension", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> AsyncIterable["_models.Extension"]: + """List all Extensions in the cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Extension or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.ExtensionsList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ExtensionsList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_config_operation_status_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_config_operation_status_operations.py new file mode 100644 index 00000000000..54a0d7148f9 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_config_operation_status_operations.py @@ -0,0 +1,138 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Optional, TypeVar + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._flux_config_operation_status_operations import build_get_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class FluxConfigOperationStatusOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`flux_config_operation_status` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + operation_id: str, + **kwargs: Any + ) -> _models.OperationStatusResult: + """Get Async Operation status. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param operation_id: operation Id. Required. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: OperationStatusResult or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusResult] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + operation_id=operation_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("OperationStatusResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}/operations/{operationId}" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_configurations_operations.py new file mode 100644 index 00000000000..f7edd199bb7 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_flux_configurations_operations.py @@ -0,0 +1,952 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._flux_configurations_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_request, + build_update_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class FluxConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`flux_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + **kwargs: Any + ) -> _models.FluxConfiguration: + """Gets details of the Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FluxConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + async def _create_or_update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: Union[_models.FluxConfiguration, IO], + **kwargs: Any + ) -> _models.FluxConfiguration: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(flux_configuration, (IOBase, bytes)): + _content = flux_configuration + else: + _json = self._serialize.body(flux_configuration, "FluxConfiguration") + + request = build_create_or_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: _models.FluxConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Required. + :type flux_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Required. + :type flux_configuration: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: Union[_models.FluxConfiguration, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Is either a + FluxConfiguration type or a IO type. Required. + :type flux_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + flux_configuration=flux_configuration, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + async def _update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: Union[_models.FluxConfigurationPatch, IO], + **kwargs: Any + ) -> _models.FluxConfiguration: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(flux_configuration_patch, (IOBase, bytes)): + _content = flux_configuration_patch + else: + _json = self._serialize.body(flux_configuration_patch, "FluxConfigurationPatch") + + request = build_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: _models.FluxConfigurationPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. + Required. + :type flux_configuration_patch: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfigurationPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. + Required. + :type flux_configuration_patch: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: Union[_models.FluxConfigurationPatch, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. Is + either a FluxConfigurationPatch type or a IO type. Required. + :type flux_configuration_patch: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfigurationPatch or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + flux_configuration_patch=flux_configuration_patch, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + force_delete=force_delete, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @distributed_trace_async + async def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """This will delete the YAML file used to set up the Flux Configuration, thus stopping future sync + from the source repo. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param force_delete: Delete the extension resource in Azure - not the normal asynchronous + delete. Default value is None. + :type force_delete: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + force_delete=force_delete, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> AsyncIterable["_models.FluxConfiguration"]: + """List all Flux Configurations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either FluxConfiguration or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.FluxConfigurationsList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("FluxConfigurationsList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operation_status_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operation_status_operations.py new file mode 100644 index 00000000000..609e62b2e18 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operation_status_operations.py @@ -0,0 +1,243 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._operation_status_operations import build_get_request, build_list_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class OperationStatusOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`operation_status` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + operation_id: str, + **kwargs: Any + ) -> _models.OperationStatusResult: + """Get Async Operation status. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param operation_id: operation Id. Required. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: OperationStatusResult or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusResult] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + operation_id=operation_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("OperationStatusResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}/operations/{operationId}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> AsyncIterable["_models.OperationStatusResult"]: + """List Async Operations, currently in progress, in a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either OperationStatusResult or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("OperationStatusList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/operations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operations.py new file mode 100644 index 00000000000..dabd06b7adf --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_operations.py @@ -0,0 +1,136 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._operations import build_list_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`operations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.ResourceProviderOperation"]: + """List all the available operations the KubernetesConfiguration resource provider supports. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceProviderOperation or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.ResourceProviderOperationList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = {"url": "/providers/Microsoft.KubernetesConfiguration/operations"} diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_source_control_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_source_control_configurations_operations.py new file mode 100644 index 00000000000..b6265dba49d --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/aio/operations/_source_control_configurations_operations.py @@ -0,0 +1,575 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._source_control_configurations_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class SourceControlConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.aio.SourceControlConfigurationClient`'s + :attr:`source_control_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Gets details of the Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.SourceControlConfiguration] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @overload + async def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: _models.SourceControlConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. + Required. + :type source_control_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. + Required. + :type source_control_configuration: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: Union[_models.SourceControlConfiguration, IO], + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. Is + either a SourceControlConfiguration type or a IO type. Required. + :type source_control_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SourceControlConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(source_control_configuration, (IOBase, bytes)): + _content = source_control_configuration + else: + _json = self._serialize.body(source_control_configuration, "SourceControlConfiguration") + + request = build_create_or_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self.create_or_update.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @distributed_trace_async + async def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """This will delete the YAML file used to set up the Source control configuration, thus stopping + future sync from the source repo. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> AsyncIterable["_models.SourceControlConfiguration"]: + """List all Source Control Configurations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either SourceControlConfiguration or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.SourceControlConfigurationList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("SourceControlConfigurationList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/__init__.py new file mode 100644 index 00000000000..955dab7fdbc --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/__init__.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models_py3 import AzureBlobDefinition +from ._models_py3 import AzureBlobPatchDefinition +from ._models_py3 import BucketDefinition +from ._models_py3 import BucketPatchDefinition +from ._models_py3 import ComplianceStatus +from ._models_py3 import ErrorAdditionalInfo +from ._models_py3 import ErrorDetail +from ._models_py3 import ErrorResponse +from ._models_py3 import Extension +from ._models_py3 import ExtensionPropertiesAksAssignedIdentity +from ._models_py3 import ExtensionStatus +from ._models_py3 import ExtensionsList +from ._models_py3 import FluxConfiguration +from ._models_py3 import FluxConfigurationPatch +from ._models_py3 import FluxConfigurationsList +from ._models_py3 import GitRepositoryDefinition +from ._models_py3 import GitRepositoryPatchDefinition +from ._models_py3 import HelmOperatorProperties +from ._models_py3 import HelmReleasePropertiesDefinition +from ._models_py3 import Identity +from ._models_py3 import KustomizationDefinition +from ._models_py3 import KustomizationPatchDefinition +from ._models_py3 import ManagedIdentityDefinition +from ._models_py3 import ManagedIdentityPatchDefinition +from ._models_py3 import ObjectReferenceDefinition +from ._models_py3 import ObjectStatusConditionDefinition +from ._models_py3 import ObjectStatusDefinition +from ._models_py3 import OperationStatusList +from ._models_py3 import OperationStatusResult +from ._models_py3 import PatchExtension +from ._models_py3 import Plan +from ._models_py3 import PostBuildDefinition +from ._models_py3 import ProxyResource +from ._models_py3 import RepositoryRefDefinition +from ._models_py3 import Resource +from ._models_py3 import ResourceProviderOperation +from ._models_py3 import ResourceProviderOperationDisplay +from ._models_py3 import ResourceProviderOperationList +from ._models_py3 import Scope +from ._models_py3 import ScopeCluster +from ._models_py3 import ScopeNamespace +from ._models_py3 import ServicePrincipalDefinition +from ._models_py3 import ServicePrincipalPatchDefinition +from ._models_py3 import SourceControlConfiguration +from ._models_py3 import SourceControlConfigurationList +from ._models_py3 import SubstituteFromDefinition +from ._models_py3 import SystemData + +from ._source_control_configuration_client_enums import AKSIdentityType +from ._source_control_configuration_client_enums import ComplianceStateType +from ._source_control_configuration_client_enums import CreatedByType +from ._source_control_configuration_client_enums import FluxComplianceState +from ._source_control_configuration_client_enums import KustomizationValidationType +from ._source_control_configuration_client_enums import LevelType +from ._source_control_configuration_client_enums import MessageLevelType +from ._source_control_configuration_client_enums import OperatorScopeType +from ._source_control_configuration_client_enums import OperatorType +from ._source_control_configuration_client_enums import ProvisioningState +from ._source_control_configuration_client_enums import ProvisioningStateType +from ._source_control_configuration_client_enums import ScopeType +from ._source_control_configuration_client_enums import SourceKindType +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureBlobDefinition", + "AzureBlobPatchDefinition", + "BucketDefinition", + "BucketPatchDefinition", + "ComplianceStatus", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "Extension", + "ExtensionPropertiesAksAssignedIdentity", + "ExtensionStatus", + "ExtensionsList", + "FluxConfiguration", + "FluxConfigurationPatch", + "FluxConfigurationsList", + "GitRepositoryDefinition", + "GitRepositoryPatchDefinition", + "HelmOperatorProperties", + "HelmReleasePropertiesDefinition", + "Identity", + "KustomizationDefinition", + "KustomizationPatchDefinition", + "ManagedIdentityDefinition", + "ManagedIdentityPatchDefinition", + "ObjectReferenceDefinition", + "ObjectStatusConditionDefinition", + "ObjectStatusDefinition", + "OperationStatusList", + "OperationStatusResult", + "PatchExtension", + "Plan", + "PostBuildDefinition", + "ProxyResource", + "RepositoryRefDefinition", + "Resource", + "ResourceProviderOperation", + "ResourceProviderOperationDisplay", + "ResourceProviderOperationList", + "Scope", + "ScopeCluster", + "ScopeNamespace", + "ServicePrincipalDefinition", + "ServicePrincipalPatchDefinition", + "SourceControlConfiguration", + "SourceControlConfigurationList", + "SubstituteFromDefinition", + "SystemData", + "AKSIdentityType", + "ComplianceStateType", + "CreatedByType", + "FluxComplianceState", + "KustomizationValidationType", + "LevelType", + "MessageLevelType", + "OperatorScopeType", + "OperatorType", + "ProvisioningState", + "ProvisioningStateType", + "ScopeType", + "SourceKindType", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_models_py3.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_models_py3.py new file mode 100644 index 00000000000..2bc1090d26f --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_models_py3.py @@ -0,0 +1,2796 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +import sys +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from ... import _serialization + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class AzureBlobDefinition(_serialization.Model): + """Parameters to reconcile to the AzureBlob source kind type. + + :ivar url: The URL to sync for the flux configuration Azure Blob storage account. + :vartype url: str + :ivar container_name: The Azure Blob container name to sync from the url endpoint for the flux + configuration. + :vartype container_name: str + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster Azure Blob + source with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster Azure Blob + source with the remote. + :vartype sync_interval_in_seconds: int + :ivar service_principal: Parameters to authenticate using Service Principal. + :vartype service_principal: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ServicePrincipalDefinition + :ivar account_key: The account key (shared key) to access the storage account. + :vartype account_key: str + :ivar sas_token: The Shared Access token to access the storage container. + :vartype sas_token: str + :ivar managed_identity: Parameters to authenticate using a Managed Identity. + :vartype managed_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ManagedIdentityDefinition + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "container_name": {"key": "containerName", "type": "str"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "service_principal": {"key": "servicePrincipal", "type": "ServicePrincipalDefinition"}, + "account_key": {"key": "accountKey", "type": "str"}, + "sas_token": {"key": "sasToken", "type": "str"}, + "managed_identity": {"key": "managedIdentity", "type": "ManagedIdentityDefinition"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + container_name: Optional[str] = None, + timeout_in_seconds: int = 600, + sync_interval_in_seconds: int = 600, + service_principal: Optional["_models.ServicePrincipalDefinition"] = None, + account_key: Optional[str] = None, + sas_token: Optional[str] = None, + managed_identity: Optional["_models.ManagedIdentityDefinition"] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration Azure Blob storage account. + :paramtype url: str + :keyword container_name: The Azure Blob container name to sync from the url endpoint for the + flux configuration. + :paramtype container_name: str + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster Azure Blob + source with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster Azure Blob + source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword service_principal: Parameters to authenticate using Service Principal. + :paramtype service_principal: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ServicePrincipalDefinition + :keyword account_key: The account key (shared key) to access the storage account. + :paramtype account_key: str + :keyword sas_token: The Shared Access token to access the storage container. + :paramtype sas_token: str + :keyword managed_identity: Parameters to authenticate using a Managed Identity. + :paramtype managed_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ManagedIdentityDefinition + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.container_name = container_name + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.service_principal = service_principal + self.account_key = account_key + self.sas_token = sas_token + self.managed_identity = managed_identity + self.local_auth_ref = local_auth_ref + + +class AzureBlobPatchDefinition(_serialization.Model): + """Parameters to reconcile to the AzureBlob source kind type. + + :ivar url: The URL to sync for the flux configuration Azure Blob storage account. + :vartype url: str + :ivar container_name: The Azure Blob container name to sync from the url endpoint for the flux + configuration. + :vartype container_name: str + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster Azure Blob + source with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster Azure Blob + source with the remote. + :vartype sync_interval_in_seconds: int + :ivar service_principal: Parameters to authenticate using Service Principal. + :vartype service_principal: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ServicePrincipalPatchDefinition + :ivar account_key: The account key (shared key) to access the storage account. + :vartype account_key: str + :ivar sas_token: The Shared Access token to access the storage container. + :vartype sas_token: str + :ivar managed_identity: Parameters to authenticate using a Managed Identity. + :vartype managed_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ManagedIdentityPatchDefinition + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "container_name": {"key": "containerName", "type": "str"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "service_principal": {"key": "servicePrincipal", "type": "ServicePrincipalPatchDefinition"}, + "account_key": {"key": "accountKey", "type": "str"}, + "sas_token": {"key": "sasToken", "type": "str"}, + "managed_identity": {"key": "managedIdentity", "type": "ManagedIdentityPatchDefinition"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + container_name: Optional[str] = None, + timeout_in_seconds: Optional[int] = None, + sync_interval_in_seconds: Optional[int] = None, + service_principal: Optional["_models.ServicePrincipalPatchDefinition"] = None, + account_key: Optional[str] = None, + sas_token: Optional[str] = None, + managed_identity: Optional["_models.ManagedIdentityPatchDefinition"] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration Azure Blob storage account. + :paramtype url: str + :keyword container_name: The Azure Blob container name to sync from the url endpoint for the + flux configuration. + :paramtype container_name: str + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster Azure Blob + source with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster Azure Blob + source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword service_principal: Parameters to authenticate using Service Principal. + :paramtype service_principal: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ServicePrincipalPatchDefinition + :keyword account_key: The account key (shared key) to access the storage account. + :paramtype account_key: str + :keyword sas_token: The Shared Access token to access the storage container. + :paramtype sas_token: str + :keyword managed_identity: Parameters to authenticate using a Managed Identity. + :paramtype managed_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ManagedIdentityPatchDefinition + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.container_name = container_name + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.service_principal = service_principal + self.account_key = account_key + self.sas_token = sas_token + self.managed_identity = managed_identity + self.local_auth_ref = local_auth_ref + + +class BucketDefinition(_serialization.Model): + """Parameters to reconcile to the Bucket source kind type. + + :ivar url: The URL to sync for the flux configuration S3 bucket. + :vartype url: str + :ivar bucket_name: The bucket name to sync from the url endpoint for the flux configuration. + :vartype bucket_name: str + :ivar insecure: Specify whether to use insecure communication when puling data from the S3 + bucket. + :vartype insecure: bool + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster bucket source + with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster bucket source + with the remote. + :vartype sync_interval_in_seconds: int + :ivar access_key: Plaintext access key used to securely access the S3 bucket. + :vartype access_key: str + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "bucket_name": {"key": "bucketName", "type": "str"}, + "insecure": {"key": "insecure", "type": "bool"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "access_key": {"key": "accessKey", "type": "str"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + bucket_name: Optional[str] = None, + insecure: bool = True, + timeout_in_seconds: int = 600, + sync_interval_in_seconds: int = 600, + access_key: Optional[str] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration S3 bucket. + :paramtype url: str + :keyword bucket_name: The bucket name to sync from the url endpoint for the flux configuration. + :paramtype bucket_name: str + :keyword insecure: Specify whether to use insecure communication when puling data from the S3 + bucket. + :paramtype insecure: bool + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster bucket source + with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster bucket + source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword access_key: Plaintext access key used to securely access the S3 bucket. + :paramtype access_key: str + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.bucket_name = bucket_name + self.insecure = insecure + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.access_key = access_key + self.local_auth_ref = local_auth_ref + + +class BucketPatchDefinition(_serialization.Model): + """Parameters to reconcile to the Bucket source kind type. + + :ivar url: The URL to sync for the flux configuration S3 bucket. + :vartype url: str + :ivar bucket_name: The bucket name to sync from the url endpoint for the flux configuration. + :vartype bucket_name: str + :ivar insecure: Specify whether to use insecure communication when puling data from the S3 + bucket. + :vartype insecure: bool + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster bucket source + with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster bucket source + with the remote. + :vartype sync_interval_in_seconds: int + :ivar access_key: Plaintext access key used to securely access the S3 bucket. + :vartype access_key: str + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "bucket_name": {"key": "bucketName", "type": "str"}, + "insecure": {"key": "insecure", "type": "bool"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "access_key": {"key": "accessKey", "type": "str"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + bucket_name: Optional[str] = None, + insecure: Optional[bool] = None, + timeout_in_seconds: Optional[int] = None, + sync_interval_in_seconds: Optional[int] = None, + access_key: Optional[str] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration S3 bucket. + :paramtype url: str + :keyword bucket_name: The bucket name to sync from the url endpoint for the flux configuration. + :paramtype bucket_name: str + :keyword insecure: Specify whether to use insecure communication when puling data from the S3 + bucket. + :paramtype insecure: bool + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster bucket source + with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster bucket + source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword access_key: Plaintext access key used to securely access the S3 bucket. + :paramtype access_key: str + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.bucket_name = bucket_name + self.insecure = insecure + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.access_key = access_key + self.local_auth_ref = local_auth_ref + + +class ComplianceStatus(_serialization.Model): + """Compliance Status details. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar compliance_state: The compliance state of the configuration. Known values are: "Pending", + "Compliant", "Noncompliant", "Installed", and "Failed". + :vartype compliance_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ComplianceStateType + :ivar last_config_applied: Datetime the configuration was last applied. + :vartype last_config_applied: ~datetime.datetime + :ivar message: Message from when the configuration was applied. + :vartype message: str + :ivar message_level: Level of the message. Known values are: "Error", "Warning", and + "Information". + :vartype message_level: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.MessageLevelType + """ + + _validation = { + "compliance_state": {"readonly": True}, + } + + _attribute_map = { + "compliance_state": {"key": "complianceState", "type": "str"}, + "last_config_applied": {"key": "lastConfigApplied", "type": "iso-8601"}, + "message": {"key": "message", "type": "str"}, + "message_level": {"key": "messageLevel", "type": "str"}, + } + + def __init__( + self, + *, + last_config_applied: Optional[datetime.datetime] = None, + message: Optional[str] = None, + message_level: Optional[Union[str, "_models.MessageLevelType"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword last_config_applied: Datetime the configuration was last applied. + :paramtype last_config_applied: ~datetime.datetime + :keyword message: Message from when the configuration was applied. + :paramtype message: str + :keyword message_level: Level of the message. Known values are: "Error", "Warning", and + "Information". + :paramtype message_level: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.MessageLevelType + """ + super().__init__(**kwargs) + self.compliance_state = None + self.last_config_applied = last_config_applied + self.message = message + self.message_level = message_level + + +class ErrorAdditionalInfo(_serialization.Model): + """The resource management error additional info. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: JSON + """ + + _validation = { + "type": {"readonly": True}, + "info": {"readonly": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "info": {"key": "info", "type": "object"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.type = None + self.info = None + + +class ErrorDetail(_serialization.Model): + """The error detail. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorAdditionalInfo] + """ + + _validation = { + "code": {"readonly": True}, + "message": {"readonly": True}, + "target": {"readonly": True}, + "details": {"readonly": True}, + "additional_info": {"readonly": True}, + } + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "details": {"key": "details", "type": "[ErrorDetail]"}, + "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.code = None + self.message = None + self.target = None + self.details = None + self.additional_info = None + + +class ErrorResponse(_serialization.Model): + """Common error response for all Azure Resource Manager APIs to return error details for failed + operations. (This also follows the OData error response format.). + + :ivar error: The error object. + :vartype error: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorDetail + """ + + _attribute_map = { + "error": {"key": "error", "type": "ErrorDetail"}, + } + + def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: + """ + :keyword error: The error object. + :paramtype error: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorDetail + """ + super().__init__(**kwargs) + self.error = error + + +class Resource(_serialization.Model): + """Common fields that are returned in the response for all Azure Resource Manager resources. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + + +class ProxyResource(Resource): + """The resource model definition for a Azure Resource Manager proxy resource. It will not have + tags and a location. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + + +class Extension(ProxyResource): # pylint: disable=too-many-instance-attributes + """The Extension object. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar identity: Identity of the Extension resource. + :vartype identity: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Identity + :ivar system_data: Top level metadata + https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources. + :vartype system_data: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SystemData + :ivar plan: The plan information. + :vartype plan: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Plan + :ivar extension_type: Type of the Extension, of which this resource is an instance of. It must + be one of the Extension Types registered with Microsoft.KubernetesConfiguration by the + Extension publisher. + :vartype extension_type: str + :ivar auto_upgrade_minor_version: Flag to note if this extension participates in auto upgrade + of minor version, or not. + :vartype auto_upgrade_minor_version: bool + :ivar release_train: ReleaseTrain this extension participates in for auto-upgrade (e.g. Stable, + Preview, etc.) - only if autoUpgradeMinorVersion is 'true'. + :vartype release_train: str + :ivar version: User-specified version of the extension for this extension to 'pin'. To use + 'version', autoUpgradeMinorVersion must be 'false'. + :vartype version: str + :ivar scope: Scope at which the extension is installed. + :vartype scope: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Scope + :ivar configuration_settings: Configuration settings, as name-value pairs for configuring this + extension. + :vartype configuration_settings: dict[str, str] + :ivar configuration_protected_settings: Configuration settings that are sensitive, as + name-value pairs for configuring this extension. + :vartype configuration_protected_settings: dict[str, str] + :ivar current_version: Currently installed version of the extension. + :vartype current_version: str + :ivar provisioning_state: Status of installation of this extension. Known values are: + "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting". + :vartype provisioning_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ProvisioningState + :ivar statuses: Status from this extension. + :vartype statuses: list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ExtensionStatus] + :ivar error_info: Error information from the Agent - e.g. errors during installation. + :vartype error_info: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorDetail + :ivar custom_location_settings: Custom Location settings properties. + :vartype custom_location_settings: dict[str, str] + :ivar package_uri: Uri of the Helm package. + :vartype package_uri: str + :ivar aks_assigned_identity: Identity of the Extension resource in an AKS cluster. + :vartype aks_assigned_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ExtensionPropertiesAksAssignedIdentity + :ivar is_system_extension: Flag to note if this extension is a system extension. + :vartype is_system_extension: bool + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "current_version": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "error_info": {"readonly": True}, + "custom_location_settings": {"readonly": True}, + "package_uri": {"readonly": True}, + "is_system_extension": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "identity": {"key": "identity", "type": "Identity"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "plan": {"key": "plan", "type": "Plan"}, + "extension_type": {"key": "properties.extensionType", "type": "str"}, + "auto_upgrade_minor_version": {"key": "properties.autoUpgradeMinorVersion", "type": "bool"}, + "release_train": {"key": "properties.releaseTrain", "type": "str"}, + "version": {"key": "properties.version", "type": "str"}, + "scope": {"key": "properties.scope", "type": "Scope"}, + "configuration_settings": {"key": "properties.configurationSettings", "type": "{str}"}, + "configuration_protected_settings": {"key": "properties.configurationProtectedSettings", "type": "{str}"}, + "current_version": {"key": "properties.currentVersion", "type": "str"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "statuses": {"key": "properties.statuses", "type": "[ExtensionStatus]"}, + "error_info": {"key": "properties.errorInfo", "type": "ErrorDetail"}, + "custom_location_settings": {"key": "properties.customLocationSettings", "type": "{str}"}, + "package_uri": {"key": "properties.packageUri", "type": "str"}, + "aks_assigned_identity": { + "key": "properties.aksAssignedIdentity", + "type": "ExtensionPropertiesAksAssignedIdentity", + }, + "is_system_extension": {"key": "properties.isSystemExtension", "type": "bool"}, + } + + def __init__( + self, + *, + identity: Optional["_models.Identity"] = None, + plan: Optional["_models.Plan"] = None, + extension_type: Optional[str] = None, + auto_upgrade_minor_version: bool = True, + release_train: str = "Stable", + version: Optional[str] = None, + scope: Optional["_models.Scope"] = None, + configuration_settings: Optional[Dict[str, str]] = None, + configuration_protected_settings: Optional[Dict[str, str]] = None, + statuses: Optional[List["_models.ExtensionStatus"]] = None, + aks_assigned_identity: Optional["_models.ExtensionPropertiesAksAssignedIdentity"] = None, + **kwargs: Any + ) -> None: + """ + :keyword identity: Identity of the Extension resource. + :paramtype identity: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Identity + :keyword plan: The plan information. + :paramtype plan: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Plan + :keyword extension_type: Type of the Extension, of which this resource is an instance of. It + must be one of the Extension Types registered with Microsoft.KubernetesConfiguration by the + Extension publisher. + :paramtype extension_type: str + :keyword auto_upgrade_minor_version: Flag to note if this extension participates in auto + upgrade of minor version, or not. + :paramtype auto_upgrade_minor_version: bool + :keyword release_train: ReleaseTrain this extension participates in for auto-upgrade (e.g. + Stable, Preview, etc.) - only if autoUpgradeMinorVersion is 'true'. + :paramtype release_train: str + :keyword version: User-specified version of the extension for this extension to 'pin'. To use + 'version', autoUpgradeMinorVersion must be 'false'. + :paramtype version: str + :keyword scope: Scope at which the extension is installed. + :paramtype scope: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Scope + :keyword configuration_settings: Configuration settings, as name-value pairs for configuring + this extension. + :paramtype configuration_settings: dict[str, str] + :keyword configuration_protected_settings: Configuration settings that are sensitive, as + name-value pairs for configuring this extension. + :paramtype configuration_protected_settings: dict[str, str] + :keyword statuses: Status from this extension. + :paramtype statuses: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ExtensionStatus] + :keyword aks_assigned_identity: Identity of the Extension resource in an AKS cluster. + :paramtype aks_assigned_identity: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ExtensionPropertiesAksAssignedIdentity + """ + super().__init__(**kwargs) + self.identity = identity + self.system_data = None + self.plan = plan + self.extension_type = extension_type + self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.release_train = release_train + self.version = version + self.scope = scope + self.configuration_settings = configuration_settings + self.configuration_protected_settings = configuration_protected_settings + self.current_version = None + self.provisioning_state = None + self.statuses = statuses + self.error_info = None + self.custom_location_settings = None + self.package_uri = None + self.aks_assigned_identity = aks_assigned_identity + self.is_system_extension = None + + +class ExtensionPropertiesAksAssignedIdentity(_serialization.Model): + """Identity of the Extension resource in an AKS cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar principal_id: The principal ID of resource identity. + :vartype principal_id: str + :ivar tenant_id: The tenant ID of resource. + :vartype tenant_id: str + :ivar type: The identity type. Known values are: "SystemAssigned" and "UserAssigned". + :vartype type: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AKSIdentityType + """ + + _validation = { + "principal_id": {"readonly": True}, + "tenant_id": {"readonly": True}, + } + + _attribute_map = { + "principal_id": {"key": "principalId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, *, type: Optional[Union[str, "_models.AKSIdentityType"]] = None, **kwargs: Any) -> None: + """ + :keyword type: The identity type. Known values are: "SystemAssigned" and "UserAssigned". + :paramtype type: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AKSIdentityType + """ + super().__init__(**kwargs) + self.principal_id = None + self.tenant_id = None + self.type = type + + +class ExtensionsList(_serialization.Model): + """Result of the request to list Extensions. It contains a list of Extension objects and a URL + link to get the next set of results. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: List of Extensions within a Kubernetes cluster. + :vartype value: list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :ivar next_link: URL to get the next set of extension objects, if any. + :vartype next_link: str + """ + + _validation = { + "value": {"readonly": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[Extension]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + self.next_link = None + + +class ExtensionStatus(_serialization.Model): + """Status from the extension. + + :ivar code: Status code provided by the Extension. + :vartype code: str + :ivar display_status: Short description of status of the extension. + :vartype display_status: str + :ivar level: Level of the status. Known values are: "Error", "Warning", and "Information". + :vartype level: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.LevelType + :ivar message: Detailed message of the status from the Extension. + :vartype message: str + :ivar time: DateLiteral (per ISO8601) noting the time of installation status. + :vartype time: str + """ + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "display_status": {"key": "displayStatus", "type": "str"}, + "level": {"key": "level", "type": "str"}, + "message": {"key": "message", "type": "str"}, + "time": {"key": "time", "type": "str"}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + display_status: Optional[str] = None, + level: Union[str, "_models.LevelType"] = "Information", + message: Optional[str] = None, + time: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword code: Status code provided by the Extension. + :paramtype code: str + :keyword display_status: Short description of status of the extension. + :paramtype display_status: str + :keyword level: Level of the status. Known values are: "Error", "Warning", and "Information". + :paramtype level: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.LevelType + :keyword message: Detailed message of the status from the Extension. + :paramtype message: str + :keyword time: DateLiteral (per ISO8601) noting the time of installation status. + :paramtype time: str + """ + super().__init__(**kwargs) + self.code = code + self.display_status = display_status + self.level = level + self.message = message + self.time = time + + +class FluxConfiguration(ProxyResource): # pylint: disable=too-many-instance-attributes + """The Flux Configuration object returned in Get & Put response. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Top level metadata + https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources. + :vartype system_data: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SystemData + :ivar scope: Scope at which the operator will be installed. Known values are: "cluster" and + "namespace". + :vartype scope: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeType + :ivar namespace: The namespace to which this configuration is installed to. Maximum of 253 + lower case alphanumeric characters, hyphen and period only. + :vartype namespace: str + :ivar source_kind: Source Kind to pull the configuration data from. Known values are: + "GitRepository", "Bucket", and "AzureBlob". + :vartype source_kind: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceKindType + :ivar suspend: Whether this configuration should suspend its reconciliation of its + kustomizations and sources. + :vartype suspend: bool + :ivar git_repository: Parameters to reconcile to the GitRepository source kind type. + :vartype git_repository: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.GitRepositoryDefinition + :ivar bucket: Parameters to reconcile to the Bucket source kind type. + :vartype bucket: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.BucketDefinition + :ivar azure_blob: Parameters to reconcile to the AzureBlob source kind type. + :vartype azure_blob: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AzureBlobDefinition + :ivar kustomizations: Array of kustomizations used to reconcile the artifact pulled by the + source type on the cluster. + :vartype kustomizations: dict[str, + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.KustomizationDefinition] + :ivar configuration_protected_settings: Key-value pairs of protected configuration settings for + the configuration. + :vartype configuration_protected_settings: dict[str, str] + :ivar statuses: Statuses of the Flux Kubernetes resources created by the fluxConfiguration or + created by the managed objects provisioned by the fluxConfiguration. + :vartype statuses: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectStatusDefinition] + :ivar repository_public_key: Public Key associated with this fluxConfiguration (either + generated within the cluster or provided by the user). + :vartype repository_public_key: str + :ivar source_synced_commit_id: Branch and/or SHA of the source commit synced with the cluster. + :vartype source_synced_commit_id: str + :ivar source_updated_at: Datetime the fluxConfiguration synced its source on the cluster. + :vartype source_updated_at: ~datetime.datetime + :ivar status_updated_at: Datetime the fluxConfiguration synced its status on the cluster with + Azure. + :vartype status_updated_at: ~datetime.datetime + :ivar wait_for_reconciliation: Whether flux configuration deployment should wait for cluster to + reconcile the kustomizations. + :vartype wait_for_reconciliation: bool + :ivar reconciliation_wait_duration: Maximum duration to wait for flux configuration + reconciliation. E.g PT1H, PT5M, P1D. + :vartype reconciliation_wait_duration: str + :ivar compliance_state: Combined status of the Flux Kubernetes resources created by the + fluxConfiguration or created by the managed objects. Known values are: "Compliant", + "Non-Compliant", "Pending", "Suspended", and "Unknown". + :vartype compliance_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxComplianceState + :ivar provisioning_state: Status of the creation of the fluxConfiguration. Known values are: + "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting". + :vartype provisioning_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ProvisioningState + :ivar error_message: Error message returned to the user in the case of provisioning failure. + :vartype error_message: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "statuses": {"readonly": True}, + "repository_public_key": {"readonly": True}, + "source_synced_commit_id": {"readonly": True}, + "source_updated_at": {"readonly": True}, + "status_updated_at": {"readonly": True}, + "compliance_state": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "error_message": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "scope": {"key": "properties.scope", "type": "str"}, + "namespace": {"key": "properties.namespace", "type": "str"}, + "source_kind": {"key": "properties.sourceKind", "type": "str"}, + "suspend": {"key": "properties.suspend", "type": "bool"}, + "git_repository": {"key": "properties.gitRepository", "type": "GitRepositoryDefinition"}, + "bucket": {"key": "properties.bucket", "type": "BucketDefinition"}, + "azure_blob": {"key": "properties.azureBlob", "type": "AzureBlobDefinition"}, + "kustomizations": {"key": "properties.kustomizations", "type": "{KustomizationDefinition}"}, + "configuration_protected_settings": {"key": "properties.configurationProtectedSettings", "type": "{str}"}, + "statuses": {"key": "properties.statuses", "type": "[ObjectStatusDefinition]"}, + "repository_public_key": {"key": "properties.repositoryPublicKey", "type": "str"}, + "source_synced_commit_id": {"key": "properties.sourceSyncedCommitId", "type": "str"}, + "source_updated_at": {"key": "properties.sourceUpdatedAt", "type": "iso-8601"}, + "status_updated_at": {"key": "properties.statusUpdatedAt", "type": "iso-8601"}, + "wait_for_reconciliation": {"key": "properties.waitForReconciliation", "type": "bool"}, + "reconciliation_wait_duration": {"key": "properties.reconciliationWaitDuration", "type": "str"}, + "compliance_state": {"key": "properties.complianceState", "type": "str"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "error_message": {"key": "properties.errorMessage", "type": "str"}, + } + + def __init__( + self, + *, + scope: Union[str, "_models.ScopeType"] = "cluster", + namespace: str = "default", + source_kind: Optional[Union[str, "_models.SourceKindType"]] = None, + suspend: bool = False, + git_repository: Optional["_models.GitRepositoryDefinition"] = None, + bucket: Optional["_models.BucketDefinition"] = None, + azure_blob: Optional["_models.AzureBlobDefinition"] = None, + kustomizations: Optional[Dict[str, "_models.KustomizationDefinition"]] = None, + configuration_protected_settings: Optional[Dict[str, str]] = None, + wait_for_reconciliation: Optional[bool] = None, + reconciliation_wait_duration: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword scope: Scope at which the operator will be installed. Known values are: "cluster" and + "namespace". + :paramtype scope: str or ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeType + :keyword namespace: The namespace to which this configuration is installed to. Maximum of 253 + lower case alphanumeric characters, hyphen and period only. + :paramtype namespace: str + :keyword source_kind: Source Kind to pull the configuration data from. Known values are: + "GitRepository", "Bucket", and "AzureBlob". + :paramtype source_kind: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceKindType + :keyword suspend: Whether this configuration should suspend its reconciliation of its + kustomizations and sources. + :paramtype suspend: bool + :keyword git_repository: Parameters to reconcile to the GitRepository source kind type. + :paramtype git_repository: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.GitRepositoryDefinition + :keyword bucket: Parameters to reconcile to the Bucket source kind type. + :paramtype bucket: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.BucketDefinition + :keyword azure_blob: Parameters to reconcile to the AzureBlob source kind type. + :paramtype azure_blob: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AzureBlobDefinition + :keyword kustomizations: Array of kustomizations used to reconcile the artifact pulled by the + source type on the cluster. + :paramtype kustomizations: dict[str, + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.KustomizationDefinition] + :keyword configuration_protected_settings: Key-value pairs of protected configuration settings + for the configuration. + :paramtype configuration_protected_settings: dict[str, str] + :keyword wait_for_reconciliation: Whether flux configuration deployment should wait for cluster + to reconcile the kustomizations. + :paramtype wait_for_reconciliation: bool + :keyword reconciliation_wait_duration: Maximum duration to wait for flux configuration + reconciliation. E.g PT1H, PT5M, P1D. + :paramtype reconciliation_wait_duration: str + """ + super().__init__(**kwargs) + self.system_data = None + self.scope = scope + self.namespace = namespace + self.source_kind = source_kind + self.suspend = suspend + self.git_repository = git_repository + self.bucket = bucket + self.azure_blob = azure_blob + self.kustomizations = kustomizations + self.configuration_protected_settings = configuration_protected_settings + self.statuses = None + self.repository_public_key = None + self.source_synced_commit_id = None + self.source_updated_at = None + self.status_updated_at = None + self.wait_for_reconciliation = wait_for_reconciliation + self.reconciliation_wait_duration = reconciliation_wait_duration + self.compliance_state = None + self.provisioning_state = None + self.error_message = None + + +class FluxConfigurationPatch(_serialization.Model): + """The Flux Configuration Patch Request object. + + :ivar source_kind: Source Kind to pull the configuration data from. Known values are: + "GitRepository", "Bucket", and "AzureBlob". + :vartype source_kind: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceKindType + :ivar suspend: Whether this configuration should suspend its reconciliation of its + kustomizations and sources. + :vartype suspend: bool + :ivar git_repository: Parameters to reconcile to the GitRepository source kind type. + :vartype git_repository: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.GitRepositoryPatchDefinition + :ivar bucket: Parameters to reconcile to the Bucket source kind type. + :vartype bucket: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.BucketPatchDefinition + :ivar azure_blob: Parameters to reconcile to the AzureBlob source kind type. + :vartype azure_blob: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AzureBlobPatchDefinition + :ivar kustomizations: Array of kustomizations used to reconcile the artifact pulled by the + source type on the cluster. + :vartype kustomizations: dict[str, + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.KustomizationPatchDefinition] + :ivar configuration_protected_settings: Key-value pairs of protected configuration settings for + the configuration. + :vartype configuration_protected_settings: dict[str, str] + """ + + _attribute_map = { + "source_kind": {"key": "properties.sourceKind", "type": "str"}, + "suspend": {"key": "properties.suspend", "type": "bool"}, + "git_repository": {"key": "properties.gitRepository", "type": "GitRepositoryPatchDefinition"}, + "bucket": {"key": "properties.bucket", "type": "BucketPatchDefinition"}, + "azure_blob": {"key": "properties.azureBlob", "type": "AzureBlobPatchDefinition"}, + "kustomizations": {"key": "properties.kustomizations", "type": "{KustomizationPatchDefinition}"}, + "configuration_protected_settings": {"key": "properties.configurationProtectedSettings", "type": "{str}"}, + } + + def __init__( + self, + *, + source_kind: Optional[Union[str, "_models.SourceKindType"]] = None, + suspend: Optional[bool] = None, + git_repository: Optional["_models.GitRepositoryPatchDefinition"] = None, + bucket: Optional["_models.BucketPatchDefinition"] = None, + azure_blob: Optional["_models.AzureBlobPatchDefinition"] = None, + kustomizations: Optional[Dict[str, "_models.KustomizationPatchDefinition"]] = None, + configuration_protected_settings: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword source_kind: Source Kind to pull the configuration data from. Known values are: + "GitRepository", "Bucket", and "AzureBlob". + :paramtype source_kind: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceKindType + :keyword suspend: Whether this configuration should suspend its reconciliation of its + kustomizations and sources. + :paramtype suspend: bool + :keyword git_repository: Parameters to reconcile to the GitRepository source kind type. + :paramtype git_repository: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.GitRepositoryPatchDefinition + :keyword bucket: Parameters to reconcile to the Bucket source kind type. + :paramtype bucket: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.BucketPatchDefinition + :keyword azure_blob: Parameters to reconcile to the AzureBlob source kind type. + :paramtype azure_blob: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.AzureBlobPatchDefinition + :keyword kustomizations: Array of kustomizations used to reconcile the artifact pulled by the + source type on the cluster. + :paramtype kustomizations: dict[str, + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.KustomizationPatchDefinition] + :keyword configuration_protected_settings: Key-value pairs of protected configuration settings + for the configuration. + :paramtype configuration_protected_settings: dict[str, str] + """ + super().__init__(**kwargs) + self.source_kind = source_kind + self.suspend = suspend + self.git_repository = git_repository + self.bucket = bucket + self.azure_blob = azure_blob + self.kustomizations = kustomizations + self.configuration_protected_settings = configuration_protected_settings + + +class FluxConfigurationsList(_serialization.Model): + """Result of the request to list Flux Configurations. It contains a list of FluxConfiguration + objects and a URL link to get the next set of results. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: List of Flux Configurations within a Kubernetes cluster. + :vartype value: list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :ivar next_link: URL to get the next set of configuration objects, if any. + :vartype next_link: str + """ + + _validation = { + "value": {"readonly": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[FluxConfiguration]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + self.next_link = None + + +class GitRepositoryDefinition(_serialization.Model): + """Parameters to reconcile to the GitRepository source kind type. + + :ivar url: The URL to sync for the flux configuration git repository. + :vartype url: str + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster git repository + source with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster git + repository source with the remote. + :vartype sync_interval_in_seconds: int + :ivar repository_ref: The source reference for the GitRepository object. + :vartype repository_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.RepositoryRefDefinition + :ivar ssh_known_hosts: Base64-encoded known_hosts value containing public SSH keys required to + access private git repositories over SSH. + :vartype ssh_known_hosts: str + :ivar https_user: Plaintext HTTPS username used to access private git repositories over HTTPS. + :vartype https_user: str + :ivar https_ca_cert: Base64-encoded HTTPS certificate authority contents used to access git + private git repositories over HTTPS. + :vartype https_ca_cert: str + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "repository_ref": {"key": "repositoryRef", "type": "RepositoryRefDefinition"}, + "ssh_known_hosts": {"key": "sshKnownHosts", "type": "str"}, + "https_user": {"key": "httpsUser", "type": "str"}, + "https_ca_cert": {"key": "httpsCACert", "type": "str"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + timeout_in_seconds: int = 600, + sync_interval_in_seconds: int = 600, + repository_ref: Optional["_models.RepositoryRefDefinition"] = None, + ssh_known_hosts: Optional[str] = None, + https_user: Optional[str] = None, + https_ca_cert: Optional[str] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration git repository. + :paramtype url: str + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster git + repository source with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster git + repository source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword repository_ref: The source reference for the GitRepository object. + :paramtype repository_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.RepositoryRefDefinition + :keyword ssh_known_hosts: Base64-encoded known_hosts value containing public SSH keys required + to access private git repositories over SSH. + :paramtype ssh_known_hosts: str + :keyword https_user: Plaintext HTTPS username used to access private git repositories over + HTTPS. + :paramtype https_user: str + :keyword https_ca_cert: Base64-encoded HTTPS certificate authority contents used to access git + private git repositories over HTTPS. + :paramtype https_ca_cert: str + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.repository_ref = repository_ref + self.ssh_known_hosts = ssh_known_hosts + self.https_user = https_user + self.https_ca_cert = https_ca_cert + self.local_auth_ref = local_auth_ref + + +class GitRepositoryPatchDefinition(_serialization.Model): + """Parameters to reconcile to the GitRepository source kind type. + + :ivar url: The URL to sync for the flux configuration git repository. + :vartype url: str + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the cluster git repository + source with the remote. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the cluster git + repository source with the remote. + :vartype sync_interval_in_seconds: int + :ivar repository_ref: The source reference for the GitRepository object. + :vartype repository_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.RepositoryRefDefinition + :ivar ssh_known_hosts: Base64-encoded known_hosts value containing public SSH keys required to + access private git repositories over SSH. + :vartype ssh_known_hosts: str + :ivar https_user: Plaintext HTTPS username used to access private git repositories over HTTPS. + :vartype https_user: str + :ivar https_ca_cert: Base64-encoded HTTPS certificate authority contents used to access git + private git repositories over HTTPS. + :vartype https_ca_cert: str + :ivar local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :vartype local_auth_ref: str + """ + + _attribute_map = { + "url": {"key": "url", "type": "str"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "repository_ref": {"key": "repositoryRef", "type": "RepositoryRefDefinition"}, + "ssh_known_hosts": {"key": "sshKnownHosts", "type": "str"}, + "https_user": {"key": "httpsUser", "type": "str"}, + "https_ca_cert": {"key": "httpsCACert", "type": "str"}, + "local_auth_ref": {"key": "localAuthRef", "type": "str"}, + } + + def __init__( + self, + *, + url: Optional[str] = None, + timeout_in_seconds: Optional[int] = None, + sync_interval_in_seconds: Optional[int] = None, + repository_ref: Optional["_models.RepositoryRefDefinition"] = None, + ssh_known_hosts: Optional[str] = None, + https_user: Optional[str] = None, + https_ca_cert: Optional[str] = None, + local_auth_ref: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword url: The URL to sync for the flux configuration git repository. + :paramtype url: str + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the cluster git + repository source with the remote. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the cluster git + repository source with the remote. + :paramtype sync_interval_in_seconds: int + :keyword repository_ref: The source reference for the GitRepository object. + :paramtype repository_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.RepositoryRefDefinition + :keyword ssh_known_hosts: Base64-encoded known_hosts value containing public SSH keys required + to access private git repositories over SSH. + :paramtype ssh_known_hosts: str + :keyword https_user: Plaintext HTTPS username used to access private git repositories over + HTTPS. + :paramtype https_user: str + :keyword https_ca_cert: Base64-encoded HTTPS certificate authority contents used to access git + private git repositories over HTTPS. + :paramtype https_ca_cert: str + :keyword local_auth_ref: Name of a local secret on the Kubernetes cluster to use as the + authentication secret rather than the managed or user-provided configuration secrets. + :paramtype local_auth_ref: str + """ + super().__init__(**kwargs) + self.url = url + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.repository_ref = repository_ref + self.ssh_known_hosts = ssh_known_hosts + self.https_user = https_user + self.https_ca_cert = https_ca_cert + self.local_auth_ref = local_auth_ref + + +class HelmOperatorProperties(_serialization.Model): + """Properties for Helm operator. + + :ivar chart_version: Version of the operator Helm chart. + :vartype chart_version: str + :ivar chart_values: Values override for the operator Helm chart. + :vartype chart_values: str + """ + + _attribute_map = { + "chart_version": {"key": "chartVersion", "type": "str"}, + "chart_values": {"key": "chartValues", "type": "str"}, + } + + def __init__( + self, *, chart_version: Optional[str] = None, chart_values: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword chart_version: Version of the operator Helm chart. + :paramtype chart_version: str + :keyword chart_values: Values override for the operator Helm chart. + :paramtype chart_values: str + """ + super().__init__(**kwargs) + self.chart_version = chart_version + self.chart_values = chart_values + + +class HelmReleasePropertiesDefinition(_serialization.Model): + """Properties for HelmRelease objects. + + :ivar last_revision_applied: The revision number of the last released object change. + :vartype last_revision_applied: int + :ivar helm_chart_ref: The reference to the HelmChart object used as the source to this + HelmRelease. + :vartype helm_chart_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectReferenceDefinition + :ivar failure_count: Total number of times that the HelmRelease failed to install or upgrade. + :vartype failure_count: int + :ivar install_failure_count: Number of times that the HelmRelease failed to install. + :vartype install_failure_count: int + :ivar upgrade_failure_count: Number of times that the HelmRelease failed to upgrade. + :vartype upgrade_failure_count: int + """ + + _attribute_map = { + "last_revision_applied": {"key": "lastRevisionApplied", "type": "int"}, + "helm_chart_ref": {"key": "helmChartRef", "type": "ObjectReferenceDefinition"}, + "failure_count": {"key": "failureCount", "type": "int"}, + "install_failure_count": {"key": "installFailureCount", "type": "int"}, + "upgrade_failure_count": {"key": "upgradeFailureCount", "type": "int"}, + } + + def __init__( + self, + *, + last_revision_applied: Optional[int] = None, + helm_chart_ref: Optional["_models.ObjectReferenceDefinition"] = None, + failure_count: Optional[int] = None, + install_failure_count: Optional[int] = None, + upgrade_failure_count: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword last_revision_applied: The revision number of the last released object change. + :paramtype last_revision_applied: int + :keyword helm_chart_ref: The reference to the HelmChart object used as the source to this + HelmRelease. + :paramtype helm_chart_ref: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectReferenceDefinition + :keyword failure_count: Total number of times that the HelmRelease failed to install or + upgrade. + :paramtype failure_count: int + :keyword install_failure_count: Number of times that the HelmRelease failed to install. + :paramtype install_failure_count: int + :keyword upgrade_failure_count: Number of times that the HelmRelease failed to upgrade. + :paramtype upgrade_failure_count: int + """ + super().__init__(**kwargs) + self.last_revision_applied = last_revision_applied + self.helm_chart_ref = helm_chart_ref + self.failure_count = failure_count + self.install_failure_count = install_failure_count + self.upgrade_failure_count = upgrade_failure_count + + +class Identity(_serialization.Model): + """Identity for the resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar principal_id: The principal ID of resource identity. + :vartype principal_id: str + :ivar tenant_id: The tenant ID of resource. + :vartype tenant_id: str + :ivar type: The identity type. Default value is "SystemAssigned". + :vartype type: str + """ + + _validation = { + "principal_id": {"readonly": True}, + "tenant_id": {"readonly": True}, + } + + _attribute_map = { + "principal_id": {"key": "principalId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, *, type: Optional[Literal["SystemAssigned"]] = None, **kwargs: Any) -> None: + """ + :keyword type: The identity type. Default value is "SystemAssigned". + :paramtype type: str + """ + super().__init__(**kwargs) + self.principal_id = None + self.tenant_id = None + self.type = type + + +class KustomizationDefinition(_serialization.Model): + """The Kustomization defining how to reconcile the artifact pulled by the source type on the + cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Kustomization, matching the key in the Kustomizations object map. + :vartype name: str + :ivar path: The path in the source reference to reconcile on the cluster. + :vartype path: str + :ivar depends_on: Specifies other Kustomizations that this Kustomization depends on. This + Kustomization will not reconcile until all dependencies have completed their reconciliation. + :vartype depends_on: list[str] + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the Kustomization on the + cluster. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the Kustomization on the + cluster. + :vartype sync_interval_in_seconds: int + :ivar retry_interval_in_seconds: The interval at which to re-reconcile the Kustomization on the + cluster in the event of failure on reconciliation. + :vartype retry_interval_in_seconds: int + :ivar prune: Enable/disable garbage collections of Kubernetes objects created by this + Kustomization. + :vartype prune: bool + :ivar force: Enable/disable re-creating Kubernetes resources on the cluster when patching fails + due to an immutable field change. + :vartype force: bool + :ivar wait: Enable/disable health check for all Kubernetes objects created by this + Kustomization. + :vartype wait: bool + :ivar post_build: Used for variable substitution for this Kustomization after kustomize build. + :vartype post_build: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PostBuildDefinition + """ + + _validation = { + "name": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "path": {"key": "path", "type": "str"}, + "depends_on": {"key": "dependsOn", "type": "[str]"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "retry_interval_in_seconds": {"key": "retryIntervalInSeconds", "type": "int"}, + "prune": {"key": "prune", "type": "bool"}, + "force": {"key": "force", "type": "bool"}, + "wait": {"key": "wait", "type": "bool"}, + "post_build": {"key": "postBuild", "type": "PostBuildDefinition"}, + } + + def __init__( + self, + *, + path: str = "", + depends_on: Optional[List[str]] = None, + timeout_in_seconds: int = 600, + sync_interval_in_seconds: int = 600, + retry_interval_in_seconds: Optional[int] = None, + prune: bool = False, + force: bool = False, + wait: bool = True, + post_build: Optional["_models.PostBuildDefinition"] = None, + **kwargs: Any + ) -> None: + """ + :keyword path: The path in the source reference to reconcile on the cluster. + :paramtype path: str + :keyword depends_on: Specifies other Kustomizations that this Kustomization depends on. This + Kustomization will not reconcile until all dependencies have completed their reconciliation. + :paramtype depends_on: list[str] + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the Kustomization on the + cluster. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the Kustomization on + the cluster. + :paramtype sync_interval_in_seconds: int + :keyword retry_interval_in_seconds: The interval at which to re-reconcile the Kustomization on + the cluster in the event of failure on reconciliation. + :paramtype retry_interval_in_seconds: int + :keyword prune: Enable/disable garbage collections of Kubernetes objects created by this + Kustomization. + :paramtype prune: bool + :keyword force: Enable/disable re-creating Kubernetes resources on the cluster when patching + fails due to an immutable field change. + :paramtype force: bool + :keyword wait: Enable/disable health check for all Kubernetes objects created by this + Kustomization. + :paramtype wait: bool + :keyword post_build: Used for variable substitution for this Kustomization after kustomize + build. + :paramtype post_build: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PostBuildDefinition + """ + super().__init__(**kwargs) + self.name = None + self.path = path + self.depends_on = depends_on + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.retry_interval_in_seconds = retry_interval_in_seconds + self.prune = prune + self.force = force + self.wait = wait + self.post_build = post_build + + +class KustomizationPatchDefinition(_serialization.Model): + """The Kustomization defining how to reconcile the artifact pulled by the source type on the + cluster. + + :ivar path: The path in the source reference to reconcile on the cluster. + :vartype path: str + :ivar depends_on: Specifies other Kustomizations that this Kustomization depends on. This + Kustomization will not reconcile until all dependencies have completed their reconciliation. + :vartype depends_on: list[str] + :ivar timeout_in_seconds: The maximum time to attempt to reconcile the Kustomization on the + cluster. + :vartype timeout_in_seconds: int + :ivar sync_interval_in_seconds: The interval at which to re-reconcile the Kustomization on the + cluster. + :vartype sync_interval_in_seconds: int + :ivar retry_interval_in_seconds: The interval at which to re-reconcile the Kustomization on the + cluster in the event of failure on reconciliation. + :vartype retry_interval_in_seconds: int + :ivar prune: Enable/disable garbage collections of Kubernetes objects created by this + Kustomization. + :vartype prune: bool + :ivar force: Enable/disable re-creating Kubernetes resources on the cluster when patching fails + due to an immutable field change. + :vartype force: bool + :ivar wait: Enable/disable health check for all Kubernetes objects created by this + Kustomization. + :vartype wait: bool + :ivar post_build: Used for variable substitution for this Kustomization after kustomize build. + :vartype post_build: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PostBuildDefinition + """ + + _attribute_map = { + "path": {"key": "path", "type": "str"}, + "depends_on": {"key": "dependsOn", "type": "[str]"}, + "timeout_in_seconds": {"key": "timeoutInSeconds", "type": "int"}, + "sync_interval_in_seconds": {"key": "syncIntervalInSeconds", "type": "int"}, + "retry_interval_in_seconds": {"key": "retryIntervalInSeconds", "type": "int"}, + "prune": {"key": "prune", "type": "bool"}, + "force": {"key": "force", "type": "bool"}, + "wait": {"key": "wait", "type": "bool"}, + "post_build": {"key": "postBuild", "type": "PostBuildDefinition"}, + } + + def __init__( + self, + *, + path: Optional[str] = None, + depends_on: Optional[List[str]] = None, + timeout_in_seconds: Optional[int] = None, + sync_interval_in_seconds: Optional[int] = None, + retry_interval_in_seconds: Optional[int] = None, + prune: Optional[bool] = None, + force: Optional[bool] = None, + wait: Optional[bool] = None, + post_build: Optional["_models.PostBuildDefinition"] = None, + **kwargs: Any + ) -> None: + """ + :keyword path: The path in the source reference to reconcile on the cluster. + :paramtype path: str + :keyword depends_on: Specifies other Kustomizations that this Kustomization depends on. This + Kustomization will not reconcile until all dependencies have completed their reconciliation. + :paramtype depends_on: list[str] + :keyword timeout_in_seconds: The maximum time to attempt to reconcile the Kustomization on the + cluster. + :paramtype timeout_in_seconds: int + :keyword sync_interval_in_seconds: The interval at which to re-reconcile the Kustomization on + the cluster. + :paramtype sync_interval_in_seconds: int + :keyword retry_interval_in_seconds: The interval at which to re-reconcile the Kustomization on + the cluster in the event of failure on reconciliation. + :paramtype retry_interval_in_seconds: int + :keyword prune: Enable/disable garbage collections of Kubernetes objects created by this + Kustomization. + :paramtype prune: bool + :keyword force: Enable/disable re-creating Kubernetes resources on the cluster when patching + fails due to an immutable field change. + :paramtype force: bool + :keyword wait: Enable/disable health check for all Kubernetes objects created by this + Kustomization. + :paramtype wait: bool + :keyword post_build: Used for variable substitution for this Kustomization after kustomize + build. + :paramtype post_build: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PostBuildDefinition + """ + super().__init__(**kwargs) + self.path = path + self.depends_on = depends_on + self.timeout_in_seconds = timeout_in_seconds + self.sync_interval_in_seconds = sync_interval_in_seconds + self.retry_interval_in_seconds = retry_interval_in_seconds + self.prune = prune + self.force = force + self.wait = wait + self.post_build = post_build + + +class ManagedIdentityDefinition(_serialization.Model): + """Parameters to authenticate using a Managed Identity. + + :ivar client_id: The client Id for authenticating a Managed Identity. + :vartype client_id: str + """ + + _attribute_map = { + "client_id": {"key": "clientId", "type": "str"}, + } + + def __init__(self, *, client_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword client_id: The client Id for authenticating a Managed Identity. + :paramtype client_id: str + """ + super().__init__(**kwargs) + self.client_id = client_id + + +class ManagedIdentityPatchDefinition(_serialization.Model): + """Parameters to authenticate using a Managed Identity. + + :ivar client_id: The client Id for authenticating a Managed Identity. + :vartype client_id: str + """ + + _attribute_map = { + "client_id": {"key": "clientId", "type": "str"}, + } + + def __init__(self, *, client_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword client_id: The client Id for authenticating a Managed Identity. + :paramtype client_id: str + """ + super().__init__(**kwargs) + self.client_id = client_id + + +class ObjectReferenceDefinition(_serialization.Model): + """Object reference to a Kubernetes object on a cluster. + + :ivar name: Name of the object. + :vartype name: str + :ivar namespace: Namespace of the object. + :vartype namespace: str + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "namespace": {"key": "namespace", "type": "str"}, + } + + def __init__(self, *, name: Optional[str] = None, namespace: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword name: Name of the object. + :paramtype name: str + :keyword namespace: Namespace of the object. + :paramtype namespace: str + """ + super().__init__(**kwargs) + self.name = name + self.namespace = namespace + + +class ObjectStatusConditionDefinition(_serialization.Model): + """Status condition of Kubernetes object. + + :ivar last_transition_time: Last time this status condition has changed. + :vartype last_transition_time: ~datetime.datetime + :ivar message: A more verbose description of the object status condition. + :vartype message: str + :ivar reason: Reason for the specified status condition type status. + :vartype reason: str + :ivar status: Status of the Kubernetes object condition type. + :vartype status: str + :ivar type: Object status condition type for this object. + :vartype type: str + """ + + _attribute_map = { + "last_transition_time": {"key": "lastTransitionTime", "type": "iso-8601"}, + "message": {"key": "message", "type": "str"}, + "reason": {"key": "reason", "type": "str"}, + "status": {"key": "status", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__( + self, + *, + last_transition_time: Optional[datetime.datetime] = None, + message: Optional[str] = None, + reason: Optional[str] = None, + status: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword last_transition_time: Last time this status condition has changed. + :paramtype last_transition_time: ~datetime.datetime + :keyword message: A more verbose description of the object status condition. + :paramtype message: str + :keyword reason: Reason for the specified status condition type status. + :paramtype reason: str + :keyword status: Status of the Kubernetes object condition type. + :paramtype status: str + :keyword type: Object status condition type for this object. + :paramtype type: str + """ + super().__init__(**kwargs) + self.last_transition_time = last_transition_time + self.message = message + self.reason = reason + self.status = status + self.type = type + + +class ObjectStatusDefinition(_serialization.Model): + """Statuses of objects deployed by the user-specified kustomizations from the git repository. + + :ivar name: Name of the applied object. + :vartype name: str + :ivar namespace: Namespace of the applied object. + :vartype namespace: str + :ivar kind: Kind of the applied object. + :vartype kind: str + :ivar compliance_state: Compliance state of the applied object showing whether the applied + object has come into a ready state on the cluster. Known values are: "Compliant", + "Non-Compliant", "Pending", "Suspended", and "Unknown". + :vartype compliance_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxComplianceState + :ivar applied_by: Object reference to the Kustomization that applied this object. + :vartype applied_by: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectReferenceDefinition + :ivar status_conditions: List of Kubernetes object status conditions present on the cluster. + :vartype status_conditions: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectStatusConditionDefinition] + :ivar helm_release_properties: Additional properties that are provided from objects of the + HelmRelease kind. + :vartype helm_release_properties: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.HelmReleasePropertiesDefinition + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "namespace": {"key": "namespace", "type": "str"}, + "kind": {"key": "kind", "type": "str"}, + "compliance_state": {"key": "complianceState", "type": "str"}, + "applied_by": {"key": "appliedBy", "type": "ObjectReferenceDefinition"}, + "status_conditions": {"key": "statusConditions", "type": "[ObjectStatusConditionDefinition]"}, + "helm_release_properties": {"key": "helmReleaseProperties", "type": "HelmReleasePropertiesDefinition"}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + namespace: Optional[str] = None, + kind: Optional[str] = None, + compliance_state: Union[str, "_models.FluxComplianceState"] = "Unknown", + applied_by: Optional["_models.ObjectReferenceDefinition"] = None, + status_conditions: Optional[List["_models.ObjectStatusConditionDefinition"]] = None, + helm_release_properties: Optional["_models.HelmReleasePropertiesDefinition"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: Name of the applied object. + :paramtype name: str + :keyword namespace: Namespace of the applied object. + :paramtype namespace: str + :keyword kind: Kind of the applied object. + :paramtype kind: str + :keyword compliance_state: Compliance state of the applied object showing whether the applied + object has come into a ready state on the cluster. Known values are: "Compliant", + "Non-Compliant", "Pending", "Suspended", and "Unknown". + :paramtype compliance_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxComplianceState + :keyword applied_by: Object reference to the Kustomization that applied this object. + :paramtype applied_by: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectReferenceDefinition + :keyword status_conditions: List of Kubernetes object status conditions present on the cluster. + :paramtype status_conditions: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ObjectStatusConditionDefinition] + :keyword helm_release_properties: Additional properties that are provided from objects of the + HelmRelease kind. + :paramtype helm_release_properties: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.HelmReleasePropertiesDefinition + """ + super().__init__(**kwargs) + self.name = name + self.namespace = namespace + self.kind = kind + self.compliance_state = compliance_state + self.applied_by = applied_by + self.status_conditions = status_conditions + self.helm_release_properties = helm_release_properties + + +class OperationStatusList(_serialization.Model): + """The async operations in progress, in the cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: List of async operations in progress, in the cluster. + :vartype value: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult] + :ivar next_link: URL to get the next set of Operation Result objects, if any. + :vartype next_link: str + """ + + _validation = { + "value": {"readonly": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[OperationStatusResult]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + self.next_link = None + + +class OperationStatusResult(_serialization.Model): + """The current status of an async operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Fully qualified ID for the async operation. + :vartype id: str + :ivar name: Name of the async operation. + :vartype name: str + :ivar status: Operation status. Required. + :vartype status: str + :ivar properties: Additional information, if available. + :vartype properties: dict[str, str] + :ivar error: If present, details of the operation error. + :vartype error: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ErrorDetail + """ + + _validation = { + "status": {"required": True}, + "error": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "status": {"key": "status", "type": "str"}, + "properties": {"key": "properties", "type": "{str}"}, + "error": {"key": "error", "type": "ErrorDetail"}, + } + + def __init__( + self, + *, + status: str, + id: Optional[str] = None, # pylint: disable=redefined-builtin + name: Optional[str] = None, + properties: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: Fully qualified ID for the async operation. + :paramtype id: str + :keyword name: Name of the async operation. + :paramtype name: str + :keyword status: Operation status. Required. + :paramtype status: str + :keyword properties: Additional information, if available. + :paramtype properties: dict[str, str] + """ + super().__init__(**kwargs) + self.id = id + self.name = name + self.status = status + self.properties = properties + self.error = None + + +class PatchExtension(_serialization.Model): + """The Extension Patch Request object. + + :ivar auto_upgrade_minor_version: Flag to note if this extension participates in auto upgrade + of minor version, or not. + :vartype auto_upgrade_minor_version: bool + :ivar release_train: ReleaseTrain this extension participates in for auto-upgrade (e.g. Stable, + Preview, etc.) - only if autoUpgradeMinorVersion is 'true'. + :vartype release_train: str + :ivar version: Version of the extension for this extension, if it is 'pinned' to a specific + version. autoUpgradeMinorVersion must be 'false'. + :vartype version: str + :ivar configuration_settings: Configuration settings, as name-value pairs for configuring this + extension. + :vartype configuration_settings: dict[str, str] + :ivar configuration_protected_settings: Configuration settings that are sensitive, as + name-value pairs for configuring this extension. + :vartype configuration_protected_settings: dict[str, str] + """ + + _attribute_map = { + "auto_upgrade_minor_version": {"key": "properties.autoUpgradeMinorVersion", "type": "bool"}, + "release_train": {"key": "properties.releaseTrain", "type": "str"}, + "version": {"key": "properties.version", "type": "str"}, + "configuration_settings": {"key": "properties.configurationSettings", "type": "{str}"}, + "configuration_protected_settings": {"key": "properties.configurationProtectedSettings", "type": "{str}"}, + } + + def __init__( + self, + *, + auto_upgrade_minor_version: bool = True, + release_train: str = "Stable", + version: Optional[str] = None, + configuration_settings: Optional[Dict[str, str]] = None, + configuration_protected_settings: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword auto_upgrade_minor_version: Flag to note if this extension participates in auto + upgrade of minor version, or not. + :paramtype auto_upgrade_minor_version: bool + :keyword release_train: ReleaseTrain this extension participates in for auto-upgrade (e.g. + Stable, Preview, etc.) - only if autoUpgradeMinorVersion is 'true'. + :paramtype release_train: str + :keyword version: Version of the extension for this extension, if it is 'pinned' to a specific + version. autoUpgradeMinorVersion must be 'false'. + :paramtype version: str + :keyword configuration_settings: Configuration settings, as name-value pairs for configuring + this extension. + :paramtype configuration_settings: dict[str, str] + :keyword configuration_protected_settings: Configuration settings that are sensitive, as + name-value pairs for configuring this extension. + :paramtype configuration_protected_settings: dict[str, str] + """ + super().__init__(**kwargs) + self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.release_train = release_train + self.version = version + self.configuration_settings = configuration_settings + self.configuration_protected_settings = configuration_protected_settings + + +class Plan(_serialization.Model): + """Plan for the resource. + + All required parameters must be populated in order to send to Azure. + + :ivar name: A user defined name of the 3rd Party Artifact that is being procured. Required. + :vartype name: str + :ivar publisher: The publisher of the 3rd Party Artifact that is being bought. E.g. NewRelic. + Required. + :vartype publisher: str + :ivar product: The 3rd Party artifact that is being procured. E.g. NewRelic. Product maps to + the OfferID specified for the artifact at the time of Data Market onboarding. Required. + :vartype product: str + :ivar promotion_code: A publisher provided promotion code as provisioned in Data Market for the + said product/artifact. + :vartype promotion_code: str + :ivar version: The version of the desired product/artifact. + :vartype version: str + """ + + _validation = { + "name": {"required": True}, + "publisher": {"required": True}, + "product": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "publisher": {"key": "publisher", "type": "str"}, + "product": {"key": "product", "type": "str"}, + "promotion_code": {"key": "promotionCode", "type": "str"}, + "version": {"key": "version", "type": "str"}, + } + + def __init__( + self, + *, + name: str, + publisher: str, + product: str, + promotion_code: Optional[str] = None, + version: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: A user defined name of the 3rd Party Artifact that is being procured. Required. + :paramtype name: str + :keyword publisher: The publisher of the 3rd Party Artifact that is being bought. E.g. + NewRelic. Required. + :paramtype publisher: str + :keyword product: The 3rd Party artifact that is being procured. E.g. NewRelic. Product maps to + the OfferID specified for the artifact at the time of Data Market onboarding. Required. + :paramtype product: str + :keyword promotion_code: A publisher provided promotion code as provisioned in Data Market for + the said product/artifact. + :paramtype promotion_code: str + :keyword version: The version of the desired product/artifact. + :paramtype version: str + """ + super().__init__(**kwargs) + self.name = name + self.publisher = publisher + self.product = product + self.promotion_code = promotion_code + self.version = version + + +class PostBuildDefinition(_serialization.Model): + """The postBuild definitions defining variable substitutions for this Kustomization after + kustomize build. + + :ivar substitute: Key/value pairs holding the variables to be substituted in this + Kustomization. + :vartype substitute: dict[str, str] + :ivar substitute_from: Array of ConfigMaps/Secrets from which the variables are substituted for + this Kustomization. + :vartype substitute_from: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SubstituteFromDefinition] + """ + + _attribute_map = { + "substitute": {"key": "substitute", "type": "{str}"}, + "substitute_from": {"key": "substituteFrom", "type": "[SubstituteFromDefinition]"}, + } + + def __init__( + self, + *, + substitute: Optional[Dict[str, str]] = None, + substitute_from: Optional[List["_models.SubstituteFromDefinition"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword substitute: Key/value pairs holding the variables to be substituted in this + Kustomization. + :paramtype substitute: dict[str, str] + :keyword substitute_from: Array of ConfigMaps/Secrets from which the variables are substituted + for this Kustomization. + :paramtype substitute_from: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SubstituteFromDefinition] + """ + super().__init__(**kwargs) + self.substitute = substitute + self.substitute_from = substitute_from + + +class RepositoryRefDefinition(_serialization.Model): + """The source reference for the GitRepository object. + + :ivar branch: The git repository branch name to checkout. + :vartype branch: str + :ivar tag: The git repository tag name to checkout. This takes precedence over branch. + :vartype tag: str + :ivar semver: The semver range used to match against git repository tags. This takes precedence + over tag. + :vartype semver: str + :ivar commit: The commit SHA to checkout. This value must be combined with the branch name to + be valid. This takes precedence over semver. + :vartype commit: str + """ + + _attribute_map = { + "branch": {"key": "branch", "type": "str"}, + "tag": {"key": "tag", "type": "str"}, + "semver": {"key": "semver", "type": "str"}, + "commit": {"key": "commit", "type": "str"}, + } + + def __init__( + self, + *, + branch: Optional[str] = None, + tag: Optional[str] = None, + semver: Optional[str] = None, + commit: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword branch: The git repository branch name to checkout. + :paramtype branch: str + :keyword tag: The git repository tag name to checkout. This takes precedence over branch. + :paramtype tag: str + :keyword semver: The semver range used to match against git repository tags. This takes + precedence over tag. + :paramtype semver: str + :keyword commit: The commit SHA to checkout. This value must be combined with the branch name + to be valid. This takes precedence over semver. + :paramtype commit: str + """ + super().__init__(**kwargs) + self.branch = branch + self.tag = tag + self.semver = semver + self.commit = commit + + +class ResourceProviderOperation(_serialization.Model): + """Supported operation of this resource provider. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Operation name, in format of {provider}/{resource}/{operation}. + :vartype name: str + :ivar display: Display metadata associated with the operation. + :vartype display: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperationDisplay + :ivar is_data_action: The flag that indicates whether the operation applies to data plane. + :vartype is_data_action: bool + :ivar origin: Origin of the operation. + :vartype origin: str + """ + + _validation = { + "is_data_action": {"readonly": True}, + "origin": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "display": {"key": "display", "type": "ResourceProviderOperationDisplay"}, + "is_data_action": {"key": "isDataAction", "type": "bool"}, + "origin": {"key": "origin", "type": "str"}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + display: Optional["_models.ResourceProviderOperationDisplay"] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: Operation name, in format of {provider}/{resource}/{operation}. + :paramtype name: str + :keyword display: Display metadata associated with the operation. + :paramtype display: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperationDisplay + """ + super().__init__(**kwargs) + self.name = name + self.display = display + self.is_data_action = None + self.origin = None + + +class ResourceProviderOperationDisplay(_serialization.Model): + """Display metadata associated with the operation. + + :ivar provider: Resource provider: Microsoft KubernetesConfiguration. + :vartype provider: str + :ivar resource: Resource on which the operation is performed. + :vartype resource: str + :ivar operation: Type of operation: get, read, delete, etc. + :vartype operation: str + :ivar description: Description of this operation. + :vartype description: str + """ + + _attribute_map = { + "provider": {"key": "provider", "type": "str"}, + "resource": {"key": "resource", "type": "str"}, + "operation": {"key": "operation", "type": "str"}, + "description": {"key": "description", "type": "str"}, + } + + def __init__( + self, + *, + provider: Optional[str] = None, + resource: Optional[str] = None, + operation: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword provider: Resource provider: Microsoft KubernetesConfiguration. + :paramtype provider: str + :keyword resource: Resource on which the operation is performed. + :paramtype resource: str + :keyword operation: Type of operation: get, read, delete, etc. + :paramtype operation: str + :keyword description: Description of this operation. + :paramtype description: str + """ + super().__init__(**kwargs) + self.provider = provider + self.resource = resource + self.operation = operation + self.description = description + + +class ResourceProviderOperationList(_serialization.Model): + """Result of the request to list operations. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: List of operations supported by this resource provider. + :vartype value: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperation] + :ivar next_link: URL to the next set of results, if any. + :vartype next_link: str + """ + + _validation = { + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[ResourceProviderOperation]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: Optional[List["_models.ResourceProviderOperation"]] = None, **kwargs: Any) -> None: + """ + :keyword value: List of operations supported by this resource provider. + :paramtype value: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperation] + """ + super().__init__(**kwargs) + self.value = value + self.next_link = None + + +class Scope(_serialization.Model): + """Scope of the extension. It can be either Cluster or Namespace; but not both. + + :ivar cluster: Specifies that the scope of the extension is Cluster. + :vartype cluster: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeCluster + :ivar namespace: Specifies that the scope of the extension is Namespace. + :vartype namespace: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeNamespace + """ + + _attribute_map = { + "cluster": {"key": "cluster", "type": "ScopeCluster"}, + "namespace": {"key": "namespace", "type": "ScopeNamespace"}, + } + + def __init__( + self, + *, + cluster: Optional["_models.ScopeCluster"] = None, + namespace: Optional["_models.ScopeNamespace"] = None, + **kwargs: Any + ) -> None: + """ + :keyword cluster: Specifies that the scope of the extension is Cluster. + :paramtype cluster: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeCluster + :keyword namespace: Specifies that the scope of the extension is Namespace. + :paramtype namespace: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ScopeNamespace + """ + super().__init__(**kwargs) + self.cluster = cluster + self.namespace = namespace + + +class ScopeCluster(_serialization.Model): + """Specifies that the scope of the extension is Cluster. + + :ivar release_namespace: Namespace where the extension Release must be placed, for a Cluster + scoped extension. If this namespace does not exist, it will be created. + :vartype release_namespace: str + """ + + _attribute_map = { + "release_namespace": {"key": "releaseNamespace", "type": "str"}, + } + + def __init__(self, *, release_namespace: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword release_namespace: Namespace where the extension Release must be placed, for a Cluster + scoped extension. If this namespace does not exist, it will be created. + :paramtype release_namespace: str + """ + super().__init__(**kwargs) + self.release_namespace = release_namespace + + +class ScopeNamespace(_serialization.Model): + """Specifies that the scope of the extension is Namespace. + + :ivar target_namespace: Namespace where the extension will be created for an Namespace scoped + extension. If this namespace does not exist, it will be created. + :vartype target_namespace: str + """ + + _attribute_map = { + "target_namespace": {"key": "targetNamespace", "type": "str"}, + } + + def __init__(self, *, target_namespace: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword target_namespace: Namespace where the extension will be created for an Namespace + scoped extension. If this namespace does not exist, it will be created. + :paramtype target_namespace: str + """ + super().__init__(**kwargs) + self.target_namespace = target_namespace + + +class ServicePrincipalDefinition(_serialization.Model): + """Parameters to authenticate using Service Principal. + + :ivar client_id: The client Id for authenticating a Service Principal. + :vartype client_id: str + :ivar tenant_id: The tenant Id for authenticating a Service Principal. + :vartype tenant_id: str + :ivar client_secret: The client secret for authenticating a Service Principal. + :vartype client_secret: str + :ivar client_certificate: Base64-encoded certificate used to authenticate a Service Principal. + :vartype client_certificate: str + :ivar client_certificate_password: The password for the certificate used to authenticate a + Service Principal. + :vartype client_certificate_password: str + :ivar client_certificate_send_chain: Specifies whether to include x5c header in client claims + when acquiring a token to enable subject name / issuer based authentication for the Client + Certificate. + :vartype client_certificate_send_chain: bool + """ + + _attribute_map = { + "client_id": {"key": "clientId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "client_secret": {"key": "clientSecret", "type": "str"}, + "client_certificate": {"key": "clientCertificate", "type": "str"}, + "client_certificate_password": {"key": "clientCertificatePassword", "type": "str"}, + "client_certificate_send_chain": {"key": "clientCertificateSendChain", "type": "bool"}, + } + + def __init__( + self, + *, + client_id: Optional[str] = None, + tenant_id: Optional[str] = None, + client_secret: Optional[str] = None, + client_certificate: Optional[str] = None, + client_certificate_password: Optional[str] = None, + client_certificate_send_chain: bool = False, + **kwargs: Any + ) -> None: + """ + :keyword client_id: The client Id for authenticating a Service Principal. + :paramtype client_id: str + :keyword tenant_id: The tenant Id for authenticating a Service Principal. + :paramtype tenant_id: str + :keyword client_secret: The client secret for authenticating a Service Principal. + :paramtype client_secret: str + :keyword client_certificate: Base64-encoded certificate used to authenticate a Service + Principal. + :paramtype client_certificate: str + :keyword client_certificate_password: The password for the certificate used to authenticate a + Service Principal. + :paramtype client_certificate_password: str + :keyword client_certificate_send_chain: Specifies whether to include x5c header in client + claims when acquiring a token to enable subject name / issuer based authentication for the + Client Certificate. + :paramtype client_certificate_send_chain: bool + """ + super().__init__(**kwargs) + self.client_id = client_id + self.tenant_id = tenant_id + self.client_secret = client_secret + self.client_certificate = client_certificate + self.client_certificate_password = client_certificate_password + self.client_certificate_send_chain = client_certificate_send_chain + + +class ServicePrincipalPatchDefinition(_serialization.Model): + """Parameters to authenticate using Service Principal. + + :ivar client_id: The client Id for authenticating a Service Principal. + :vartype client_id: str + :ivar tenant_id: The tenant Id for authenticating a Service Principal. + :vartype tenant_id: str + :ivar client_secret: The client secret for authenticating a Service Principal. + :vartype client_secret: str + :ivar client_certificate: Base64-encoded certificate used to authenticate a Service Principal. + :vartype client_certificate: str + :ivar client_certificate_password: The password for the certificate used to authenticate a + Service Principal. + :vartype client_certificate_password: str + :ivar client_certificate_send_chain: Specifies whether to include x5c header in client claims + when acquiring a token to enable subject name / issuer based authentication for the Client + Certificate. + :vartype client_certificate_send_chain: bool + """ + + _attribute_map = { + "client_id": {"key": "clientId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "client_secret": {"key": "clientSecret", "type": "str"}, + "client_certificate": {"key": "clientCertificate", "type": "str"}, + "client_certificate_password": {"key": "clientCertificatePassword", "type": "str"}, + "client_certificate_send_chain": {"key": "clientCertificateSendChain", "type": "bool"}, + } + + def __init__( + self, + *, + client_id: Optional[str] = None, + tenant_id: Optional[str] = None, + client_secret: Optional[str] = None, + client_certificate: Optional[str] = None, + client_certificate_password: Optional[str] = None, + client_certificate_send_chain: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword client_id: The client Id for authenticating a Service Principal. + :paramtype client_id: str + :keyword tenant_id: The tenant Id for authenticating a Service Principal. + :paramtype tenant_id: str + :keyword client_secret: The client secret for authenticating a Service Principal. + :paramtype client_secret: str + :keyword client_certificate: Base64-encoded certificate used to authenticate a Service + Principal. + :paramtype client_certificate: str + :keyword client_certificate_password: The password for the certificate used to authenticate a + Service Principal. + :paramtype client_certificate_password: str + :keyword client_certificate_send_chain: Specifies whether to include x5c header in client + claims when acquiring a token to enable subject name / issuer based authentication for the + Client Certificate. + :paramtype client_certificate_send_chain: bool + """ + super().__init__(**kwargs) + self.client_id = client_id + self.tenant_id = tenant_id + self.client_secret = client_secret + self.client_certificate = client_certificate + self.client_certificate_password = client_certificate_password + self.client_certificate_send_chain = client_certificate_send_chain + + +class SourceControlConfiguration(ProxyResource): # pylint: disable=too-many-instance-attributes + """The SourceControl Configuration object returned in Get & Put response. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Top level metadata + https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources. + :vartype system_data: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SystemData + :ivar repository_url: Url of the SourceControl Repository. + :vartype repository_url: str + :ivar operator_namespace: The namespace to which this operator is installed to. Maximum of 253 + lower case alphanumeric characters, hyphen and period only. + :vartype operator_namespace: str + :ivar operator_instance_name: Instance name of the operator - identifying the specific + configuration. + :vartype operator_instance_name: str + :ivar operator_type: Type of the operator. "Flux" + :vartype operator_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperatorType + :ivar operator_params: Any Parameters for the Operator instance in string format. + :vartype operator_params: str + :ivar configuration_protected_settings: Name-value pairs of protected configuration settings + for the configuration. + :vartype configuration_protected_settings: dict[str, str] + :ivar operator_scope: Scope at which the operator will be installed. Known values are: + "cluster" and "namespace". + :vartype operator_scope: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperatorScopeType + :ivar repository_public_key: Public Key associated with this SourceControl configuration + (either generated within the cluster or provided by the user). + :vartype repository_public_key: str + :ivar ssh_known_hosts_contents: Base64-encoded known_hosts contents containing public SSH keys + required to access private Git instances. + :vartype ssh_known_hosts_contents: str + :ivar enable_helm_operator: Option to enable Helm Operator for this git configuration. + :vartype enable_helm_operator: bool + :ivar helm_operator_properties: Properties for Helm operator. + :vartype helm_operator_properties: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.HelmOperatorProperties + :ivar provisioning_state: The provisioning state of the resource provider. Known values are: + "Accepted", "Deleting", "Running", "Succeeded", and "Failed". + :vartype provisioning_state: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ProvisioningStateType + :ivar compliance_status: Compliance Status of the Configuration. + :vartype compliance_status: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ComplianceStatus + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "repository_public_key": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "compliance_status": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "repository_url": {"key": "properties.repositoryUrl", "type": "str"}, + "operator_namespace": {"key": "properties.operatorNamespace", "type": "str"}, + "operator_instance_name": {"key": "properties.operatorInstanceName", "type": "str"}, + "operator_type": {"key": "properties.operatorType", "type": "str"}, + "operator_params": {"key": "properties.operatorParams", "type": "str"}, + "configuration_protected_settings": {"key": "properties.configurationProtectedSettings", "type": "{str}"}, + "operator_scope": {"key": "properties.operatorScope", "type": "str"}, + "repository_public_key": {"key": "properties.repositoryPublicKey", "type": "str"}, + "ssh_known_hosts_contents": {"key": "properties.sshKnownHostsContents", "type": "str"}, + "enable_helm_operator": {"key": "properties.enableHelmOperator", "type": "bool"}, + "helm_operator_properties": {"key": "properties.helmOperatorProperties", "type": "HelmOperatorProperties"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "compliance_status": {"key": "properties.complianceStatus", "type": "ComplianceStatus"}, + } + + def __init__( + self, + *, + repository_url: Optional[str] = None, + operator_namespace: str = "default", + operator_instance_name: Optional[str] = None, + operator_type: Optional[Union[str, "_models.OperatorType"]] = None, + operator_params: Optional[str] = None, + configuration_protected_settings: Optional[Dict[str, str]] = None, + operator_scope: Union[str, "_models.OperatorScopeType"] = "cluster", + ssh_known_hosts_contents: Optional[str] = None, + enable_helm_operator: Optional[bool] = None, + helm_operator_properties: Optional["_models.HelmOperatorProperties"] = None, + **kwargs: Any + ) -> None: + """ + :keyword repository_url: Url of the SourceControl Repository. + :paramtype repository_url: str + :keyword operator_namespace: The namespace to which this operator is installed to. Maximum of + 253 lower case alphanumeric characters, hyphen and period only. + :paramtype operator_namespace: str + :keyword operator_instance_name: Instance name of the operator - identifying the specific + configuration. + :paramtype operator_instance_name: str + :keyword operator_type: Type of the operator. "Flux" + :paramtype operator_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperatorType + :keyword operator_params: Any Parameters for the Operator instance in string format. + :paramtype operator_params: str + :keyword configuration_protected_settings: Name-value pairs of protected configuration settings + for the configuration. + :paramtype configuration_protected_settings: dict[str, str] + :keyword operator_scope: Scope at which the operator will be installed. Known values are: + "cluster" and "namespace". + :paramtype operator_scope: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperatorScopeType + :keyword ssh_known_hosts_contents: Base64-encoded known_hosts contents containing public SSH + keys required to access private Git instances. + :paramtype ssh_known_hosts_contents: str + :keyword enable_helm_operator: Option to enable Helm Operator for this git configuration. + :paramtype enable_helm_operator: bool + :keyword helm_operator_properties: Properties for Helm operator. + :paramtype helm_operator_properties: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.HelmOperatorProperties + """ + super().__init__(**kwargs) + self.system_data = None + self.repository_url = repository_url + self.operator_namespace = operator_namespace + self.operator_instance_name = operator_instance_name + self.operator_type = operator_type + self.operator_params = operator_params + self.configuration_protected_settings = configuration_protected_settings + self.operator_scope = operator_scope + self.repository_public_key = None + self.ssh_known_hosts_contents = ssh_known_hosts_contents + self.enable_helm_operator = enable_helm_operator + self.helm_operator_properties = helm_operator_properties + self.provisioning_state = None + self.compliance_status = None + + +class SourceControlConfigurationList(_serialization.Model): + """Result of the request to list Source Control Configurations. It contains a list of + SourceControlConfiguration objects and a URL link to get the next set of results. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar value: List of Source Control Configurations within a Kubernetes cluster. + :vartype value: + list[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration] + :ivar next_link: URL to get the next set of configuration objects, if any. + :vartype next_link: str + """ + + _validation = { + "value": {"readonly": True}, + "next_link": {"readonly": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[SourceControlConfiguration]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.value = None + self.next_link = None + + +class SubstituteFromDefinition(_serialization.Model): + """Array of ConfigMaps/Secrets from which the variables are substituted for this Kustomization. + + :ivar kind: Define whether it is ConfigMap or Secret that holds the variables to be used in + substitution. + :vartype kind: str + :ivar name: Name of the ConfigMap/Secret that holds the variables to be used in substitution. + :vartype name: str + :ivar optional: Set to True to proceed without ConfigMap/Secret, if it is not present. + :vartype optional: bool + """ + + _attribute_map = { + "kind": {"key": "kind", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "optional": {"key": "optional", "type": "bool"}, + } + + def __init__( + self, *, kind: Optional[str] = None, name: Optional[str] = None, optional: bool = False, **kwargs: Any + ) -> None: + """ + :keyword kind: Define whether it is ConfigMap or Secret that holds the variables to be used in + substitution. + :paramtype kind: str + :keyword name: Name of the ConfigMap/Secret that holds the variables to be used in + substitution. + :paramtype name: str + :keyword optional: Set to True to proceed without ConfigMap/Secret, if it is not present. + :paramtype optional: bool + """ + super().__init__(**kwargs) + self.kind = kind + self.name = name + self.optional = optional + + +class SystemData(_serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The type of identity that created the resource. Known values are: + "User", "Application", "ManagedIdentity", and "Key". + :vartype created_by_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.CreatedByType + :ivar created_at: The timestamp of resource creation (UTC). + :vartype created_at: ~datetime.datetime + :ivar last_modified_by: The identity that last modified the resource. + :vartype last_modified_by: str + :ivar last_modified_by_type: The type of identity that last modified the resource. Known values + are: "User", "Application", "ManagedIdentity", and "Key". + :vartype last_modified_by_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.CreatedByType + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + "created_by": {"key": "createdBy", "type": "str"}, + "created_by_type": {"key": "createdByType", "type": "str"}, + "created_at": {"key": "createdAt", "type": "iso-8601"}, + "last_modified_by": {"key": "lastModifiedBy", "type": "str"}, + "last_modified_by_type": {"key": "lastModifiedByType", "type": "str"}, + "last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"}, + } + + def __init__( + self, + *, + created_by: Optional[str] = None, + created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + created_at: Optional[datetime.datetime] = None, + last_modified_by: Optional[str] = None, + last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """ + :keyword created_by: The identity that created the resource. + :paramtype created_by: str + :keyword created_by_type: The type of identity that created the resource. Known values are: + "User", "Application", "ManagedIdentity", and "Key". + :paramtype created_by_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.CreatedByType + :keyword created_at: The timestamp of resource creation (UTC). + :paramtype created_at: ~datetime.datetime + :keyword last_modified_by: The identity that last modified the resource. + :paramtype last_modified_by: str + :keyword last_modified_by_type: The type of identity that last modified the resource. Known + values are: "User", "Application", "ManagedIdentity", and "Key". + :paramtype last_modified_by_type: str or + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.CreatedByType + :keyword last_modified_at: The timestamp of resource last modification (UTC). + :paramtype last_modified_at: ~datetime.datetime + """ + super().__init__(**kwargs) + self.created_by = created_by + self.created_by_type = created_by_type + self.created_at = created_at + self.last_modified_by = last_modified_by + self.last_modified_by_type = last_modified_by_type + self.last_modified_at = last_modified_at diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_source_control_configuration_client_enums.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_source_control_configuration_client_enums.py new file mode 100644 index 00000000000..cf097984794 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/models/_source_control_configuration_client_enums.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AKSIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The identity type.""" + + SYSTEM_ASSIGNED = "SystemAssigned" + USER_ASSIGNED = "UserAssigned" + + +class ComplianceStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The compliance state of the configuration.""" + + PENDING = "Pending" + COMPLIANT = "Compliant" + NONCOMPLIANT = "Noncompliant" + INSTALLED = "Installed" + FAILED = "Failed" + + +class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of identity that created the resource.""" + + USER = "User" + APPLICATION = "Application" + MANAGED_IDENTITY = "ManagedIdentity" + KEY = "Key" + + +class FluxComplianceState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Compliance state of the cluster object.""" + + COMPLIANT = "Compliant" + NON_COMPLIANT = "Non-Compliant" + PENDING = "Pending" + SUSPENDED = "Suspended" + UNKNOWN = "Unknown" + + +class KustomizationValidationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specify whether to validate the Kubernetes objects referenced in the Kustomization before + applying them to the cluster. + """ + + NONE = "none" + CLIENT = "client" + SERVER = "server" + + +class LevelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Level of the status.""" + + ERROR = "Error" + WARNING = "Warning" + INFORMATION = "Information" + + +class MessageLevelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Level of the message.""" + + ERROR = "Error" + WARNING = "Warning" + INFORMATION = "Information" + + +class OperatorScopeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Scope at which the operator will be installed.""" + + CLUSTER = "cluster" + NAMESPACE = "namespace" + + +class OperatorType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of the operator.""" + + FLUX = "Flux" + + +class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The provisioning state of the resource.""" + + SUCCEEDED = "Succeeded" + FAILED = "Failed" + CANCELED = "Canceled" + CREATING = "Creating" + UPDATING = "Updating" + DELETING = "Deleting" + + +class ProvisioningStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The provisioning state of the resource provider.""" + + ACCEPTED = "Accepted" + DELETING = "Deleting" + RUNNING = "Running" + SUCCEEDED = "Succeeded" + FAILED = "Failed" + + +class ScopeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Scope at which the configuration will be installed.""" + + CLUSTER = "cluster" + NAMESPACE = "namespace" + + +class SourceKindType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Source Kind to pull the configuration data from.""" + + GIT_REPOSITORY = "GitRepository" + BUCKET = "Bucket" + AZURE_BLOB = "AzureBlob" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/__init__.py new file mode 100644 index 00000000000..9d58b5443a0 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._extensions_operations import ExtensionsOperations +from ._operation_status_operations import OperationStatusOperations +from ._flux_configurations_operations import FluxConfigurationsOperations +from ._flux_config_operation_status_operations import FluxConfigOperationStatusOperations +from ._source_control_configurations_operations import SourceControlConfigurationsOperations +from ._operations import Operations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ExtensionsOperations", + "OperationStatusOperations", + "FluxConfigurationsOperations", + "FluxConfigOperationStatusOperations", + "SourceControlConfigurationsOperations", + "Operations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_extensions_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_extensions_operations.py new file mode 100644 index 00000000000..824b299a08b --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_extensions_operations.py @@ -0,0 +1,1152 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + subscription_id: str, + *, + force_delete: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if force_delete is not None: + _params["forceDelete"] = _SERIALIZER.query("force_delete", force_delete, "bool") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class ExtensionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`extensions` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _create_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: Union[_models.Extension, IO], + **kwargs: Any + ) -> _models.Extension: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(extension, (IOBase, bytes)): + _content = extension + else: + _json = self._serialize.body(extension, "Extension") + + request = build_create_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Extension", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @overload + def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: _models.Extension, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Required. + :type extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Required. + :type extension: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + extension: Union[_models.Extension, IO], + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Create a new Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param extension: Properties necessary to Create an Extension. Is either a Extension type or a + IO type. Required. + :type extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + extension=extension, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Extension", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace + def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + **kwargs: Any + ) -> _models.Extension: + """Gets Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Extension or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + force_delete=force_delete, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace + def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> LROPoller[None]: + """Delete a Kubernetes Cluster Extension. This will cause the Agent to Uninstall the extension + from the cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param force_delete: Delete the extension resource in Azure - not the normal asynchronous + delete. Default value is None. + :type force_delete: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + force_delete=force_delete, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + def _update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: Union[_models.PatchExtension, IO], + **kwargs: Any + ) -> _models.Extension: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(patch_extension, (IOBase, bytes)): + _content = patch_extension + else: + _json = self._serialize.body(patch_extension, "PatchExtension") + + request = build_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Extension", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Extension", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: _models.PatchExtension, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Required. + :type patch_extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PatchExtension + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Required. + :type patch_extension: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + patch_extension: Union[_models.PatchExtension, IO], + **kwargs: Any + ) -> LROPoller[_models.Extension]: + """Patch an existing Kubernetes Cluster Extension. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param patch_extension: Properties to Patch in an existing Extension. Is either a + PatchExtension type or a IO type. Required. + :type patch_extension: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.PatchExtension or + IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Extension or the result of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Extension] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + patch_extension=patch_extension, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Extension", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> Iterable["_models.Extension"]: + """List all Extensions in the cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Extension or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.Extension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.ExtensionsList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ExtensionsList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_config_operation_status_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_config_operation_status_operations.py new file mode 100644 index 00000000000..19efb8bf2ce --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_config_operation_status_operations.py @@ -0,0 +1,185 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Optional, TypeVar + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_get_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + operation_id: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}/operations/{operationId}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "fluxConfigurationName": _SERIALIZER.url("flux_configuration_name", flux_configuration_name, "str"), + "operationId": _SERIALIZER.url("operation_id", operation_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class FluxConfigOperationStatusOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`flux_config_operation_status` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + operation_id: str, + **kwargs: Any + ) -> _models.OperationStatusResult: + """Get Async Operation status. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param operation_id: operation Id. Required. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: OperationStatusResult or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusResult] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + operation_id=operation_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("OperationStatusResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}/operations/{operationId}" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_configurations_operations.py new file mode 100644 index 00000000000..96b66f0e377 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_flux_configurations_operations.py @@ -0,0 +1,1163 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_get_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "fluxConfigurationName": _SERIALIZER.url("flux_configuration_name", flux_configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "fluxConfigurationName": _SERIALIZER.url("flux_configuration_name", flux_configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "fluxConfigurationName": _SERIALIZER.url("flux_configuration_name", flux_configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + subscription_id: str, + *, + force_delete: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "fluxConfigurationName": _SERIALIZER.url("flux_configuration_name", flux_configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if force_delete is not None: + _params["forceDelete"] = _SERIALIZER.query("force_delete", force_delete, "bool") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class FluxConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`flux_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + **kwargs: Any + ) -> _models.FluxConfiguration: + """Gets details of the Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FluxConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + def _create_or_update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: Union[_models.FluxConfiguration, IO], + **kwargs: Any + ) -> _models.FluxConfiguration: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(flux_configuration, (IOBase, bytes)): + _content = flux_configuration + else: + _json = self._serialize.body(flux_configuration, "FluxConfiguration") + + request = build_create_or_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: _models.FluxConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Required. + :type flux_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Required. + :type flux_configuration: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration: Union[_models.FluxConfiguration, IO], + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Create a new Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration: Properties necessary to Create a FluxConfiguration. Is either a + FluxConfiguration type or a IO type. Required. + :type flux_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + flux_configuration=flux_configuration, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + def _update_initial( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: Union[_models.FluxConfigurationPatch, IO], + **kwargs: Any + ) -> _models.FluxConfiguration: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(flux_configuration_patch, (IOBase, bytes)): + _content = flux_configuration_patch + else: + _json = self._serialize.body(flux_configuration_patch, "FluxConfigurationPatch") + + request = build_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: _models.FluxConfigurationPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. + Required. + :type flux_configuration_patch: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfigurationPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. + Required. + :type flux_configuration_patch: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + flux_configuration_patch: Union[_models.FluxConfigurationPatch, IO], + **kwargs: Any + ) -> LROPoller[_models.FluxConfiguration]: + """Update an existing Kubernetes Flux Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param flux_configuration_patch: Properties to Patch in an existing Flux Configuration. Is + either a FluxConfigurationPatch type or a IO type. Required. + :type flux_configuration_patch: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfigurationPatch or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either FluxConfiguration or the result of + cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FluxConfiguration] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + flux_configuration_patch=flux_configuration_patch, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("FluxConfiguration", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + subscription_id=self._config.subscription_id, + force_delete=force_delete, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @distributed_trace + def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + flux_configuration_name: str, + force_delete: Optional[bool] = None, + **kwargs: Any + ) -> LROPoller[None]: + """This will delete the YAML file used to set up the Flux Configuration, thus stopping future sync + from the source repo. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param flux_configuration_name: Name of the Flux Configuration. Required. + :type flux_configuration_name: str + :param force_delete: Delete the extension resource in Azure - not the normal asynchronous + delete. Default value is None. + :type force_delete: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + flux_configuration_name=flux_configuration_name, + force_delete=force_delete, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> Iterable["_models.FluxConfiguration"]: + """List all Flux Configurations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either FluxConfiguration or the result of cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.FluxConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.FluxConfigurationsList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("FluxConfigurationsList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operation_status_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operation_status_operations.py new file mode 100644 index 00000000000..433206bc99c --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operation_status_operations.py @@ -0,0 +1,329 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Iterable, Optional, TypeVar +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_get_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + operation_id: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}/operations/{operationId}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + "operationId": _SERIALIZER.url("operation_id", operation_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/operations", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class OperationStatusOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`operation_status` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + extension_name: str, + operation_id: str, + **kwargs: Any + ) -> _models.OperationStatusResult: + """Get Async Operation status. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param extension_name: Name of the Extension. Required. + :type extension_name: str + :param operation_id: operation Id. Required. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: OperationStatusResult or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusResult] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + extension_name=extension_name, + operation_id=operation_id, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("OperationStatusResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/extensions/{extensionName}/operations/{operationId}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> Iterable["_models.OperationStatusResult"]: + """List Async Operations, currently in progress, in a cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either OperationStatusResult or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.OperationStatusResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.OperationStatusList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("OperationStatusList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/operations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operations.py new file mode 100644 index 00000000000..d663549b87d --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_operations.py @@ -0,0 +1,158 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Iterable, Optional, TypeVar +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/providers/Microsoft.KubernetesConfiguration/operations") + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`operations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.ResourceProviderOperation"]: + """List all the available operations the KubernetesConfiguration resource provider supports. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceProviderOperation or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.ResourceProviderOperation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.ResourceProviderOperationList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = {"url": "/providers/Microsoft.KubernetesConfiguration/operations"} diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_source_control_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_source_control_configurations_operations.py new file mode 100644 index 00000000000..cbbb2b78173 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/operations/_source_control_configurations_operations.py @@ -0,0 +1,747 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_get_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "sourceControlConfigurationName": _SERIALIZER.url( + "source_control_configuration_name", source_control_configuration_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "sourceControlConfigurationName": _SERIALIZER.url( + "source_control_configuration_name", source_control_configuration_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + "sourceControlConfigurationName": _SERIALIZER.url( + "source_control_configuration_name", source_control_configuration_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_request( + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "clusterRp": _SERIALIZER.url("cluster_rp", cluster_rp, "str"), + "clusterResourceName": _SERIALIZER.url("cluster_resource_name", cluster_resource_name, "str"), + "clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class SourceControlConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.kubernetesconfiguration.v2023_05_01.SourceControlConfigurationClient`'s + :attr:`source_control_configurations` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def get( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Gets details of the Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.SourceControlConfiguration] = kwargs.pop("cls", None) + + request = build_get_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @overload + def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: _models.SourceControlConfiguration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. + Required. + :type source_control_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. + Required. + :type source_control_configuration: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + source_control_configuration: Union[_models.SourceControlConfiguration, IO], + **kwargs: Any + ) -> _models.SourceControlConfiguration: + """Create a new Kubernetes Source Control Configuration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :param source_control_configuration: Properties necessary to Create KubernetesConfiguration. Is + either a SourceControlConfiguration type or a IO type. Required. + :type source_control_configuration: + ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SourceControlConfiguration or the result of cls(response) + :rtype: ~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SourceControlConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(source_control_configuration, (IOBase, bytes)): + _content = source_control_configuration + else: + _json = self._serialize.body(source_control_configuration, "SourceControlConfiguration") + + request = build_create_or_update_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self.create_or_update.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize("SourceControlConfiguration", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @distributed_trace + def begin_delete( + self, + resource_group_name: str, + cluster_rp: str, + cluster_resource_name: str, + cluster_name: str, + source_control_configuration_name: str, + **kwargs: Any + ) -> LROPoller[None]: + """This will delete the YAML file used to set up the Source control configuration, thus stopping + future sync from the source repo. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :param source_control_configuration_name: Name of the Source Control Configuration. Required. + :type source_control_configuration_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + source_control_configuration_name=source_control_configuration_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}" + } + + @distributed_trace + def list( + self, resource_group_name: str, cluster_rp: str, cluster_resource_name: str, cluster_name: str, **kwargs: Any + ) -> Iterable["_models.SourceControlConfiguration"]: + """List all Source Control Configurations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param cluster_rp: The Kubernetes cluster RP - i.e. Microsoft.ContainerService, + Microsoft.Kubernetes, Microsoft.HybridContainerService. Required. + :type cluster_rp: str + :param cluster_resource_name: The Kubernetes cluster resource name - i.e. managedClusters, + connectedClusters, provisionedClusters. Required. + :type cluster_resource_name: str + :param cluster_name: The name of the kubernetes cluster. Required. + :type cluster_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either SourceControlConfiguration or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2023_05_01.models.SourceControlConfiguration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-05-01")) + cls: ClsType[_models.SourceControlConfigurationList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_list_request( + resource_group_name=resource_group_name, + cluster_rp=cluster_rp, + cluster_resource_name=cluster_resource_name, + cluster_name=cluster_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("SourceControlConfigurationList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations" + } diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/py.typed b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_kubernetesconfiguration/v2023_05_01/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file From 098a6ce0f705c231e2569b3b6229fb0bda37e18c Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Fri, 6 Feb 2026 12:34:52 +0530 Subject: [PATCH 06/24] Initial cut with baseline testing Signed-off-by: Anshul Ahuja --- src/aks-preview/azext_aks_preview/_params.py | 14 +- .../managed_cluster_decorator.py | 32 +- .../azext_dataprotection/manual/_consts.py | 20 + .../azext_dataprotection/manual/_params.py | 19 +- .../manual/aks/aks_helper.py | 1192 ++++++++++++++--- .../azext_dataprotection/manual/commands.py | 4 +- .../azext_dataprotection/manual/custom.py | 78 +- .../azext_dataprotection/manual/enums.py | 34 +- 8 files changed, 1145 insertions(+), 248 deletions(-) create mode 100644 src/dataprotection/azext_dataprotection/manual/_consts.py diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index 5bff793ab77..1e1c5f86d0b 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -24,10 +24,20 @@ validate_nat_gateway_managed_outbound_ip_count, ) -from azext_dataprotection.manual.enums import ( - backup_presets +# Import backup strategy constants from dataprotection extension +from azure.cli.core.extension.operations import add_extension_to_path +add_extension_to_path("dataprotection") +from azext_dataprotection.manual._consts import ( + CONST_AKS_BACKUP_STRATEGIES, + CONST_BACKUP_STRATEGY_WEEK, + CONST_BACKUP_STRATEGY_MONTH, + CONST_BACKUP_STRATEGY_IMMUTABLE, + CONST_BACKUP_STRATEGY_DISASTER_RECOVERY, + CONST_BACKUP_STRATEGY_CUSTOM, ) +backup_presets = CONST_AKS_BACKUP_STRATEGIES + from azure.cli.core.commands.parameters import ( edge_zone_type, file_type, diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index db0d762aeb3..6094fd7f527 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -7383,22 +7383,28 @@ def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: enable_backup = self.context.raw_param.get("enable_backup") if enable_backup: - from azext_dataprotection.manual.enums import backup_presets + # Validate that dataprotection extension is installed + try: + from azure.cli.core.extension.operations import add_extension_to_path + add_extension_to_path("dataprotection") + from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper + except (ImportError, ModuleNotFoundError): + raise CLIError( + "The 'dataprotection' extension is required for AKS backup functionality.\n" + "Please install it using: az extension add --name dataprotection" + ) backup_strategy = self.context.raw_param.get("backup_strategy") backup_configuration_parameters = self.context.raw_param.get("backup_configuration_parameters") - from msrestazure.tools import resource_id - - cluster_resource_id = resource_id( - subscription=self.context.get_subscription_id(), - resource_group=self.context.get_resource_group_name(), - namespace="Microsoft.ContainerService", - type="managedClusters", - name=self.context.get_name(), - ) - - from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper - dataprotection_enable_backup_helper(str(cluster_resource_id), json.dumps(backup_strategy), json.dumps(backup_configuration_parameters)) + + # Build the cluster resource ID + cluster_resource_id = ( + f"/subscriptions/{self.context.get_subscription_id()}" + f"/resourceGroups/{self.context.get_resource_group_name()}" + f"/providers/Microsoft.ContainerService/managedClusters/{self.context.get_name()}" + ) + + dataprotection_enable_backup_helper(self.cmd, str(cluster_resource_id), backup_strategy, backup_configuration_parameters) return mc def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: diff --git a/src/dataprotection/azext_dataprotection/manual/_consts.py b/src/dataprotection/azext_dataprotection/manual/_consts.py new file mode 100644 index 00000000000..9944212ff33 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/manual/_consts.py @@ -0,0 +1,20 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# AKS Backup Strategy Constants +CONST_BACKUP_STRATEGY_WEEK = "Week" +CONST_BACKUP_STRATEGY_MONTH = "Month" +CONST_BACKUP_STRATEGY_IMMUTABLE = "Immutable" +CONST_BACKUP_STRATEGY_DISASTER_RECOVERY = "DisasterRecovery" +CONST_BACKUP_STRATEGY_CUSTOM = "Custom" + +# List of all backup strategies for AKS +CONST_AKS_BACKUP_STRATEGIES = [ + CONST_BACKUP_STRATEGY_WEEK, + CONST_BACKUP_STRATEGY_MONTH, + CONST_BACKUP_STRATEGY_IMMUTABLE, + CONST_BACKUP_STRATEGY_DISASTER_RECOVERY, + CONST_BACKUP_STRATEGY_CUSTOM, +] diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index 19899c22834..7945be7eadb 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -43,7 +43,7 @@ get_resource_type_values, get_persistent_volume_restore_mode_values, get_conflict_policy_values, - backup_presets, + get_all_backup_strategies, ) vault_name_type = CLIArgumentType(help='Name of the backup vault.', options_list=['--vault-name', '-v'], type=str) @@ -183,11 +183,18 @@ def load_arguments(self, _): c.argument('restore_request_object', type=validate_file_or_dict, help='Request body for operation "Restore" Expected value: ' 'json-string/@json-file. Required when --operation is Restore') - ## dataprotection enable-backup - with self.argument_context('dataprotection enable-backup') as c: - c.argument('datasource_uri', type=str, help="The URI of the datasource to be backed up.") - c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Recommended.") - c.argument('configuration_parameters', type=validate_file_or_dict, help="Workload specific configuration overrides.") + ## Enable Backup command + with self.argument_context('dataprotection enable-backup trigger') as c: + c.argument('datasource_type', type=str, help="The type of datasource to be backed up. Supported values: AzureKubernetesService.") + c.argument('datasource_id', type=str, help="The full ARM resource ID of the datasource to be backed up.") + c.argument('backup_strategy', arg_type=get_enum_type(get_all_backup_strategies()), + help="Backup strategy preset. For AzureKubernetesService: Week (7-day retention), Month (30-day retention), " + "Immutable (7-day Op + 90-day Vault Tier), DisasterRecovery (GRS+CRR), Custom (bring your own vault/policy). " + "Default: Week.") + c.argument('configuration_settings', type=validate_file_or_dict, + help="Configuration settings file or JSON string. Expected value: json-string/@json-file. " + "Available settings: storage-account-id, blob-container-name, backup-resource-group-id, " + "backup-vault-id (required for Custom), backup-policy-id (required for Custom), tags.") with self.argument_context('dataprotection job show') as c: c.argument('resource_group_name', resource_group_name_type) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 55d17a8438a..0ddc80d06dd 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -1,61 +1,430 @@ import json -from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED +from azure.cli.core.azclierror import InvalidArgumentValueError from azure.cli.core.commands.client_factory import get_mgmt_service_client +from azure.mgmt.core.tools import parse_resource_id -def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): - print("datasourceId: " + datasource_id) - print("backupStrategy: " + backup_strategy) - print ("configurationParams: " + json.dumps(configuration_params)) - cluster_subscription_id = datasource_id.split('/')[2] - cluster_resource_group_name = datasource_id.split('/')[4] - cluster_name = datasource_id.split('/')[8] +# Tag used to identify storage accounts created for AKS backup +# Format: AKSAzureBackup: +AKS_BACKUP_TAG_KEY = "AKSAzureBackup" - from azure.cli.command_modules.role.custom import create_role_assignment - from azure.mgmt.resource import ResourceManagementClient - resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) +def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", max_retries=3, retry_delay=10): + """ + Check if a role assignment already exists, and create it if not. + + Args: + cmd: CLI command context + role: Role name (e.g., 'Contributor', 'Reader', 'Storage Blob Data Contributor') + assignee: Principal ID of the identity to assign the role to + scope: Resource ID scope for the role assignment + identity_name: Friendly name of the identity for error messages + max_retries: Max retries for transient failures (like identity not propagated yet) + retry_delay: Delay in seconds between retries + + Returns: + True if role was assigned (new or existing), raises on failure + """ + import time + from azure.cli.command_modules.role.custom import list_role_assignments, create_role_assignment + from azure.core.exceptions import HttpResponseError + + # Check if role assignment already exists + try: + existing_assignments = list_role_assignments( + cmd, + assignee=assignee, + role=role, + scope=scope, + include_inherited=True + ) + + if existing_assignments: + print(f" Role '{role}' already assigned to {identity_name}") + return True + except Exception: + # If we can't list, we'll try to create and handle any errors there + pass + + # Try to create the role assignment with retries for transient failures + last_error = None + for attempt in range(max_retries): + try: + create_role_assignment( + cmd, + role=role, + assignee=assignee, + scope=scope + ) + print(f" Role '{role}' assigned to {identity_name}") + return True + except (HttpResponseError, Exception) as e: + error_message = str(e) + last_error = error_message + + # Check if this is a "already exists" conflict (409) + if "already exists" in error_message.lower() or "conflict" in error_message.lower(): + print(f" Role '{role}' already assigned to {identity_name}") + return True + + # Check if this is a permission/authorization error (not retryable) + if "authorization" in error_message.lower() or "forbidden" in error_message.lower() or "permission" in error_message.lower(): + raise InvalidArgumentValueError( + f"Failed to assign '{role}' role to {identity_name}.\n" + f"You don't have sufficient permissions to create role assignments.\n\n" + f"Please ask your administrator to run the following command:\n\n" + f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n\n" + f"After the role is assigned, re-run this command." + ) + + # Check if this is a "principal not found" error (retryable - identity propagation) + if "cannot find" in error_message.lower() or "does not exist" in error_message.lower() or "principal" in error_message.lower(): + if attempt < max_retries - 1: + print(f" Waiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") + time.sleep(retry_delay) + continue + + # For other errors, don't retry + break + + # If we get here, we've exhausted retries or hit a non-retryable error + raise InvalidArgumentValueError( + f"Failed to assign '{role}' role to {identity_name}.\n" + f"Error: {last_error}\n\n" + f"You can try to manually assign the role using:\n\n" + f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n\n" + f"After the role is assigned, re-run this command." + ) + + +def __validate_request(datasource_id, backup_strategy, configuration_params): + """ + Validate the request parameters. Raises InvalidArgumentValueError on validation failure. + + Args: + datasource_id: Full ARM resource ID of the AKS cluster + backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) + configuration_params: Dict with configuration settings + """ + # Ensure configuration_params is a dict + if configuration_params is None: + configuration_params = {} + + # Parse if string + if isinstance(configuration_params, str): + try: + json.loads(configuration_params) + except json.JSONDecodeError: + raise InvalidArgumentValueError("Invalid JSON in configuration-settings") + + # Validate Custom strategy requirements + if backup_strategy == 'Custom': + if not configuration_params.get("backup-vault-id"): + raise InvalidArgumentValueError( + "backup-vault-id is required in --configuration-settings when using Custom strategy" + ) + if not configuration_params.get("backup-policy-id"): + raise InvalidArgumentValueError( + "backup-policy-id is required in --configuration-settings when using Custom strategy" + ) + + # Parse cluster subscription for validation + cluster_id_parts = parse_resource_id(datasource_id) + cluster_subscription_id = cluster_id_parts['subscription'] + + # Validate provided resource IDs are in the same subscription as cluster + backup_resource_group_id = configuration_params.get("backup-resource-group-id") + if backup_resource_group_id: + rg_parts = parse_resource_id(backup_resource_group_id) + if rg_parts['subscription'].lower() != cluster_subscription_id.lower(): + raise InvalidArgumentValueError( + f"backup-resource-group-id must be in the same subscription as the cluster. " + f"Cluster subscription: {cluster_subscription_id}, Resource group subscription: {rg_parts['subscription']}" + ) + + storage_account_id = configuration_params.get("storage-account-id") + if storage_account_id: + sa_parts = parse_resource_id(storage_account_id) + if sa_parts['subscription'].lower() != cluster_subscription_id.lower(): + raise InvalidArgumentValueError( + f"storage-account-id must be in the same subscription as the cluster. " + f"Cluster subscription: {cluster_subscription_id}, Storage account subscription: {sa_parts['subscription']}" + ) + + backup_vault_id = configuration_params.get("backup-vault-id") + if backup_vault_id: + vault_parts = parse_resource_id(backup_vault_id) + if vault_parts['subscription'].lower() != cluster_subscription_id.lower(): + raise InvalidArgumentValueError( + f"backup-vault-id must be in the same subscription as the cluster. " + f"Cluster subscription: {cluster_subscription_id}, Backup vault subscription: {vault_parts['subscription']}" + ) + + +def __check_existing_backup_instance(resource_client, datasource_id, cluster_name): + """ + Check if a backup instance already exists for this cluster using extension routing. + + Calls: GET {datasource_id}/providers/Microsoft.DataProtection/backupInstances + + Returns: + None if no backup instance exists, raises error with details if one exists + """ + print(f" Checking for existing backup configuration...") + + try: + # Use extension routing to query backup instances on the cluster + extension_resource_id = f"{datasource_id}/providers/Microsoft.DataProtection/backupInstances" + response = resource_client.resources.get_by_id( + extension_resource_id, + api_version="2024-04-01" + ) + + # Parse the response to get backup instances list + bi_list = [] + if hasattr(response, 'value'): + bi_list = response.value if response.value else [] + elif hasattr(response, 'additional_properties'): + props = response.additional_properties + if isinstance(props, dict) and 'value' in props: + bi_list = props['value'] if props['value'] else [] + + # If list is empty, no backup instance exists + if not bi_list: + print(f" No existing backup instance found") + return None + + # Get details of the first backup instance + bi = bi_list[0] if isinstance(bi_list, list) else bi_list + bi_id = bi.get('id', 'Unknown') if isinstance(bi, dict) else getattr(bi, 'id', 'Unknown') + bi_name = bi.get('name', 'Unknown') if isinstance(bi, dict) else getattr(bi, 'name', 'Unknown') + + # Get protection status from properties + bi_properties = bi.get('properties', {}) if isinstance(bi, dict) else getattr(bi, 'properties', {}) + if isinstance(bi_properties, dict): + protection_status = bi_properties.get('currentProtectionState', 'Unknown') + protection_error = bi_properties.get('protectionErrorDetails', None) + else: + protection_status = getattr(bi_properties, 'current_protection_state', 'Unknown') + protection_error = getattr(bi_properties, 'protection_error_details', None) + + # Parse vault info from the BI resource ID + # Format: /subscriptions/.../resourceGroups/.../providers/Microsoft.DataProtection/backupVaults/{vault}/backupInstances/{bi} + vault_name = "Unknown" + vault_rg = "Unknown" + if bi_id and '/backupVaults/' in str(bi_id): + bi_parts = parse_resource_id(bi_id) + vault_name = bi_parts.get('name', 'Unknown') + vault_rg = bi_parts.get('resource_group', 'Unknown') + + print(f" Found existing backup instance!") + print(f" - Backup Instance: {bi_name}") + print(f" - Backup Vault: {vault_name}") + print(f" - Resource Group: {vault_rg}") + print(f" - Protection State: {protection_status}") + + error_info = "" + if protection_error: + error_msg = protection_error.get('message', str(protection_error)) if isinstance(protection_error, dict) else str(protection_error) + print(f" - Error Details: {error_msg[:100]}..." if len(str(error_msg)) > 100 else f" - Error Details: {error_msg}") + error_info = f"\n Protection Error: {error_msg}\n" + + raise InvalidArgumentValueError( + f"Cluster '{cluster_name}' is already protected by a backup instance.\n\n" + f"Existing Backup Configuration:\n" + f" Backup Instance: {bi_name}\n" + f" Backup Vault: {vault_name}\n" + f" Resource Group: {vault_rg}\n" + f" Protection State: {protection_status}{error_info}\n" + f"To reconfigure backup, first delete the existing backup instance:\n\n" + f" az dataprotection backup-instance delete \\\n" + f" --name \"{bi_name}\" \\\n" + f" --vault-name \"{vault_name}\" \\\n" + f" --resource-group \"{vault_rg}\" \\\n" + f" --yes\n\n" + f"Then re-run this command." + ) + + except InvalidArgumentValueError: + # Re-raise our own error + raise + except Exception as e: + # 404 or other errors mean no backup instance exists - that's fine + error_str = str(e).lower() + if "not found" in error_str or "404" in error_str or "does not exist" in error_str: + print(f" No existing backup instance found") + return None + # For other errors, log and continue (don't block on extension routing failures) + print(f" Could not check for existing backup (will proceed): {str(e)[:100]}") + return None + + print(f" No existing backup instance found") + return None + + +def __validate_cluster(resource_client, datasource_id, cluster_name): + """Validate the AKS cluster exists and get its details.""" cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") cluster_location = cluster_resource.location + print(f" Cluster: {cluster_name}") + print(f" Location: {cluster_location}") + print(f" [OK] Cluster validated") + return cluster_resource, cluster_location + + +def __find_existing_backup_resource_group(resource_client, cluster_location): + """ + Search for an existing AKS backup resource group in the subscription by tag. - """ - - Create backup vault and policy in the cluster resource group* - - Create backup resource group - - Create backup storage account and container - - Create backup extension - - Create trusted access role binding - - Assign all permissions - - Create backup instance + Looks for resource groups with tag: AKSAzureBackup = + + Returns: + resource_group if found, None otherwise """ + try: + # List all resource groups in the subscription + for rg in resource_client.resource_groups.list(): + if rg.tags: + # Check if this RG has the AKS backup tag matching the location + tag_value = rg.tags.get(AKS_BACKUP_TAG_KEY) + if tag_value and tag_value.lower() == cluster_location.lower(): + return rg + except Exception: + # If we can't list resource groups, we'll create a new one + pass + return None + + +def __setup_resource_group(cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_identity_principal_id, resource_tags): + """Create or use backup resource group.""" + if backup_resource_group_id: + backup_resource_group_name = parse_resource_id(backup_resource_group_id)['resource_group'] + print(f" Using provided resource group: {backup_resource_group_name}") + try: + backup_resource_group = resource_client.resource_groups.get(backup_resource_group_name) + except Exception: + raise InvalidArgumentValueError( + f"Resource group '{backup_resource_group_name}' not found. " + f"Please ensure the resource group exists or remove 'backup-resource-group-id' from configuration to create one automatically." + ) + else: + # Search for existing backup resource group with matching tag + print(f" Searching for existing AKS backup resource group in region {cluster_location}...") + backup_resource_group = __find_existing_backup_resource_group(resource_client, cluster_location) + + if backup_resource_group: + # Found existing resource group - reuse it + backup_resource_group_name = backup_resource_group.name + print(f" Found existing backup resource group: {backup_resource_group_name}") + else: + # Create new resource group with AKS backup tag + backup_resource_group_name = __generate_backup_resource_group_name(cluster_location) + print(f" Creating resource group: {backup_resource_group_name}") + + # Build tags - include AKS backup tag plus any user-provided tags + rg_tags = {AKS_BACKUP_TAG_KEY: cluster_location} + if resource_tags: + rg_tags.update(resource_tags) + + rg_params = {"location": cluster_location, "tags": rg_tags} + backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, rg_params) - backup_resource_group_name = __generate_backup_resource_group_name(cluster_location, cluster_name) - print(f"Creating backup resource group ({backup_resource_group_name}) ...") - backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, {"location": cluster_location}) - print(f"Assigning 'Contributor' role to the cluster identity on the backup resource group ({backup_resource_group_name}) ...") - create_role_assignment( + print(f" Resource Group: {backup_resource_group.id}") + __check_and_assign_role( cmd, role="Contributor", - assignee=cluster_resource.identity.principal_id, - scope=backup_resource_group.id) + assignee=cluster_identity_principal_id, + scope=backup_resource_group.id, + identity_name="cluster identity") + print(f" [OK] Resource group ready") + + return backup_resource_group, backup_resource_group_name + +def __find_existing_backup_storage_account(storage_client, cluster_location): + """ + Search for an existing AKS backup storage account in the subscription by tag. + + Looks for storage accounts with tag: AKSAzureBackup = + + Returns: + tuple: (storage_account, resource_group_name) if found, (None, None) otherwise + """ + try: + # List all storage accounts in the subscription + for sa in storage_client.storage_accounts.list(): + if sa.tags: + # Check if this SA has the AKS backup tag matching the location + tag_value = sa.tags.get(AKS_BACKUP_TAG_KEY) + if tag_value and tag_value.lower() == cluster_location.lower(): + # Parse resource group from the SA id + sa_parts = parse_resource_id(sa.id) + return sa, sa_parts['resource_group'] + except Exception: + # If we can't list storage accounts, we'll create a new one + pass + return None, None + + +def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags): + """Create or use storage account.""" from azure.mgmt.storage import StorageManagementClient + storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) - backup_storage_account_name = __generate_backup_storage_account_name(cluster_location) - print(f"Creating storage account ({backup_storage_account_name}) in the backup resource group ({backup_resource_group_name}) ...") - backup_storage_account = storage_client.storage_accounts.begin_create( - resource_group_name=backup_resource_group_name, - account_name=backup_storage_account_name, - parameters={ - "location": cluster_location, - "kind": "StorageV2", - "sku": {"name": "Standard_LRS"}, - "allow_blob_public_access": False - }).result() - - backup_storage_account_container_name = __generate_backup_storage_account_container_name(cluster_name) - print(f"Creating blob container ({backup_storage_account_container_name}) in the backup storage account ({backup_storage_account_name}) ...") - storage_client.blob_containers.create(backup_resource_group_name, backup_storage_account_name, backup_storage_account_container_name, {}) - + storage_account_rg = backup_resource_group_name # Default to backup RG + + if storage_account_id: + # Use provided storage account + sa_parts = parse_resource_id(storage_account_id) + backup_storage_account_name = sa_parts['name'] + storage_account_rg = sa_parts['resource_group'] + print(f" Using provided storage account: {backup_storage_account_name}") + backup_storage_account = storage_client.storage_accounts.get_properties(storage_account_rg, backup_storage_account_name) + backup_storage_account_container_name = blob_container_name if blob_container_name else __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + else: + # Search for existing backup storage account with matching tag + print(f" Searching for existing AKS backup storage account in region {cluster_location}...") + backup_storage_account, existing_rg = __find_existing_backup_storage_account(storage_client, cluster_location) + + if backup_storage_account: + # Found existing storage account - reuse it + backup_storage_account_name = backup_storage_account.name + storage_account_rg = existing_rg + print(f" Found existing backup storage account: {backup_storage_account_name}") + else: + # Create new storage account with AKS backup tag + backup_storage_account_name = __generate_backup_storage_account_name(cluster_location) + print(f" Creating storage account: {backup_storage_account_name}") + + # Build tags - include AKS backup tag plus any user-provided tags + sa_tags = {AKS_BACKUP_TAG_KEY: cluster_location} + if resource_tags: + sa_tags.update(resource_tags) + + storage_params = { + "location": cluster_location, + "kind": "StorageV2", + "sku": {"name": "Standard_LRS"}, + "allow_blob_public_access": False, + "tags": sa_tags + } + backup_storage_account = storage_client.storage_accounts.begin_create( + resource_group_name=backup_resource_group_name, + account_name=backup_storage_account_name, + parameters=storage_params).result() + + backup_storage_account_container_name = __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + + print(f" Storage Account: {backup_storage_account.id}") + print(f" Creating blob container: {backup_storage_account_container_name}") + storage_client.blob_containers.create(storage_account_rg, backup_storage_account_name, backup_storage_account_container_name, {}) + print(f" [OK] Storage account ready") + + return backup_storage_account, backup_storage_account_name, backup_storage_account_container_name + + +def __install_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_storage_account_name, backup_storage_account_container_name, backup_resource_group_name, backup_storage_account): + """Install backup extension on the cluster.""" backup_extension = __create_backup_extension( cmd, cluster_subscription_id, @@ -66,196 +435,599 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy backup_resource_group_name, cluster_subscription_id) - print(f"Assigning 'Storage Blob Data Contributor' role to the extension identity on the backup storage account ({backup_storage_account_name}) ...") - create_role_assignment( + __check_and_assign_role( cmd, role="Storage Blob Data Contributor", assignee=backup_extension.aks_assigned_identity.principal_id, - scope=backup_storage_account.id) + scope=backup_storage_account.id, + identity_name="backup extension identity") + print(f" [OK] Backup extension ready") + + return backup_extension + + +def __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): + """ + Search for an existing AKS backup vault in the subscription by tag. + + Looks for backup vaults with tag: AKSAzureBackup = + + Returns: + backup_vault if found, None otherwise + """ + from azext_dataprotection.aaz.latest.dataprotection.backup_vault import List as _BackupVaultList + + try: + # List all backup vaults in the subscription + vaults = _BackupVaultList(cli_ctx=cmd.cli_ctx)(command_args={}) + + for vault in vaults: + if vault.get('tags'): + # Check if this vault has the AKS backup tag matching the location + tag_value = vault['tags'].get(AKS_BACKUP_TAG_KEY) + if tag_value and tag_value.lower() == cluster_location.lower(): + return vault + except Exception: + # If we can't list vaults, we'll create a new one + pass + return None + +def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): + """Create or use backup vault.""" from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Create as _BackupVaultCreate - backup_vault_name = __generate_backup_vault_name(cluster_location) - print(f"Creating backup vault ({backup_vault_name}) in the cluster resource group ({cluster_resource_group_name}) ...") - backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args={ - "vault_name": backup_vault_name, - "resource_group": cluster_resource_group_name, - "type": "SystemAssigned", - "storage_setting": [{'type': 'LocallyRedundant', 'datastore-type': 'VaultStore'}] - }).result() + + if backup_strategy == 'Custom' and backup_vault_id: + # Use provided vault for Custom strategy + vault_parts = parse_resource_id(backup_vault_id) + backup_vault_name = vault_parts['name'] + vault_rg = vault_parts['resource_group'] + print(f" Using provided backup vault: {backup_vault_name}") + from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as _BackupVaultShow + backup_vault = _BackupVaultShow(cli_ctx=cmd.cli_ctx)(command_args={ + "vault_name": backup_vault_name, + "resource_group": vault_rg + }) + else: + # Search for existing backup vault with matching tag + print(f" Searching for existing AKS backup vault in region {cluster_location}...") + backup_vault = __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) + + if backup_vault: + # Found existing vault - reuse it + backup_vault_name = backup_vault['name'] + print(f" Found existing backup vault: {backup_vault_name}") + else: + # Create new backup vault with AKS backup tag + backup_vault_name = __generate_backup_vault_name(cluster_location) + print(f" Creating backup vault: {backup_vault_name}") + + # Build tags - include AKS backup tag plus any user-provided tags + vault_tags = {AKS_BACKUP_TAG_KEY: cluster_location} + if resource_tags: + vault_tags.update(resource_tags) + + backup_vault_args = { + "vault_name": backup_vault_name, + "resource_group": backup_resource_group_name, + "location": cluster_location, + "type": "SystemAssigned", + "storage_setting": [{'type': 'LocallyRedundant', 'datastore-type': 'VaultStore'}], + "tags": vault_tags + } + backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() - print(f"Assigning 'Reader' role to the backup vault identity on the cluster ({cluster_name}) ...") - create_role_assignment( + print(f" Backup Vault: {backup_vault['id']}") + __check_and_assign_role( cmd, role="Reader", assignee=backup_vault["identity"]["principalId"], - scope=cluster_resource.id) + scope=cluster_resource.id, + identity_name="backup vault identity (on cluster)") - print(f"Assigning 'Reader' role to the backup vault identity on the backup resource group ({backup_resource_group_name}) ...") - create_role_assignment( + __check_and_assign_role( cmd, role="Reader", assignee=backup_vault["identity"]["principalId"], - scope=backup_resource_group.id) + scope=backup_resource_group.id, + identity_name="backup vault identity (on resource group)") + print(f" [OK] Backup vault ready") + + return backup_vault, backup_vault_name + + +def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id): + """Create or use backup policy.""" + from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate + from azext_dataprotection.aaz.latest.dataprotection.backup_policy import List as _BackupPolicyList + + # Create or use backup policy + if backup_strategy == 'Custom' and backup_policy_id: + # Use provided policy for Custom strategy + backup_policy_name = parse_resource_id(backup_policy_id)['name'] + print(f" Using provided backup policy: {backup_policy_name}") + backup_policy = {"id": backup_policy_id} + else: + # Get vault RG - for custom with provided vault, use vault's RG + vault_rg_for_policy = backup_resource_group_name + if backup_strategy == 'Custom' and backup_vault_id: + vault_rg_for_policy = parse_resource_id(backup_vault_id)['resource_group'] + + # Check if policy already exists in this vault + backup_policy_name = __generate_backup_policy_name(backup_strategy) + existing_policy = None + try: + policies = _BackupPolicyList(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": vault_rg_for_policy, + "vault_name": backup_vault_name + }) + for policy in policies: + if policy.get('name') == backup_policy_name: + existing_policy = policy + break + except Exception: + pass + + if existing_policy: + print(f" Found existing backup policy: {backup_policy_name}") + backup_policy = existing_policy + else: + # Create policy based on strategy + policy_config = __get_policy_config_for_strategy(backup_strategy) + print(f" Creating backup policy: {backup_policy_name}") + + backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ + "backup_policy_name": backup_policy_name, + "resource_group": vault_rg_for_policy, + "vault_name": backup_vault_name, + "policy": policy_config + }) + + print(f" Backup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") + print(f" [OK] Backup policy ready") - print(f"Setting up trusted access between the cluster ({cluster_name}) and the backup vault ({backup_vault_name}) ...") + return backup_policy + + +def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault): + """Setup trusted access role binding between backup vault and cluster.""" from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice import ContainerServiceClient from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice.v2024_07_01.models import TrustedAccessRoleBinding - + cluster_client = get_mgmt_service_client(cmd.cli_ctx, ContainerServiceClient, subscription_id=cluster_subscription_id) + vault_id = backup_vault["id"] + vault_name = backup_vault["name"] + + print(f" Configuring trusted access between:") + print(f" - Backup Vault: {vault_name}") + print(f" - AKS Cluster: {cluster_name}") + + # Check if trusted access binding already exists for this vault-cluster pair + print(f" Checking for existing trusted access binding...") + try: + existing_bindings = cluster_client.trusted_access_role_bindings.list( + resource_group_name=cluster_resource_group_name, + resource_name=cluster_name + ) + for binding in existing_bindings: + if binding.source_resource_id.lower() == vault_id.lower(): + print(f" Found existing binding: {binding.name}") + print(f" [OK] Trusted access already configured") + return + except Exception: + # If we can't list, we'll try to create + pass + + # Create new trusted access role binding with GUID-based name + binding_name = __generate_trusted_access_role_binding_name() + print(f" Creating trusted access role binding: {binding_name}") + print(f" Role: Microsoft.DataProtection/backupVaults/backup-operator") + _trusted_access_role_binding = TrustedAccessRoleBinding( - source_resource_id=backup_vault["id"], + source_resource_id=vault_id, roles=["Microsoft.DataProtection/backupVaults/backup-operator"]) cluster_client.trusted_access_role_bindings.begin_create_or_update( resource_group_name=cluster_resource_group_name, resource_name=cluster_name, - trusted_access_role_binding_name=__generate_trusted_access_role_binding_name(backup_vault_name), + trusted_access_role_binding_name=binding_name, trusted_access_role_binding=_trusted_access_role_binding).result() + print(f" [OK] Trusted access configured - vault can now access cluster for backup operations") - print(f"Creating backup policy in the backup vault ({backup_vault_name}) ...") - from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate - backup_policy_name = __generate_backup_policy_name() - backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ - "backup_policy_name": backup_policy_name, - "resource_group": cluster_resource_group_name, - "vault_name": backup_vault_name, - "policy": { - "objectType": "BackupPolicy", - "datasourceTypes": [ - "Microsoft.ContainerService/managedClusters" - ], - "policyRules": [ - { - "isDefault": True, - "lifecycles": [ - { - "deleteAfter": { - "duration": "P1D", - "objectType": "AbsoluteDeleteOption" - }, - "sourceDataStore": { - "dataStoreType": "OperationalStore", - "objectType": "DataStoreInfoBase" - }, - "targetDataStoreCopySettings": [] - } - ], - "name": "Default", - "objectType": "AzureRetentionRule" - }, - { - "backupParameters": { - "backupType": "Incremental", - "objectType": "AzureBackupParams" - }, - "dataStore": { - "dataStoreType": "OperationalStore", - "objectType": "DataStoreInfoBase" - }, - "name": "BackupHourly", - "objectType": "AzureBackupRule", - "trigger": { - "objectType": "ScheduleBasedTriggerContext", - "schedule": { - "repeatingTimeIntervals": [ - "R/2024-01-01T00:00:00+00:00/PT6H" - ], - "timeZone": "Coordinated Universal Time" - }, - "taggingCriteria": [ - { - "isDefault": True, - "tagInfo": { - "id": "Default_", - "tagName": "Default" - }, - "taggingPriority": 99 - } - ] - } - } - ] - } - }) - print(f"Running final validation and configuring backup for the cluster ({cluster_name}) ...") +def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group): + """Create backup instance.""" from azext_dataprotection.manual.aaz_operations.backup_instance import ValidateAndCreate as _BackupInstanceValidateAndCreate - import uuid - backup_instance_name = f"{cluster_name}-{uuid.uuid4()}" + + backup_instance_name = f"{cluster_name}-{str(uuid.uuid4())[:8]}" + + # Get vault RG for backup instance - use backup RG unless custom vault provided + vault_rg_for_bi = backup_resource_group_name + if backup_strategy == 'Custom' and backup_vault_id: + vault_rg_for_bi = parse_resource_id(backup_vault_id)['resource_group'] + + # Get policy ID + policy_id_for_bi = backup_policy.get("id") if isinstance(backup_policy, dict) else backup_policy_id + + print(f" Creating backup instance: {backup_instance_name}") + backup_instance_payload = __get_backup_instance_payload( + backup_instance_name=backup_instance_name, + cluster_name=cluster_name, + datasource_id=datasource_id, + cluster_location=cluster_location, + policy_id=policy_id_for_bi, + backup_resource_group_id=backup_resource_group.id + ) + backup_instance = _BackupInstanceValidateAndCreate(cli_ctx=cmd.cli_ctx)(command_args={ "backup_instance_name": backup_instance_name, - "resource_group": cluster_resource_group_name, + "resource_group": vault_rg_for_bi, "vault_name": backup_vault_name, - "backup_instance": { - "backup_instance_name": backup_instance_name, - "properties": { - "friendly_name": f"{cluster_name}\\fullbackup", - "object_type": "BackupInstance", - "data_source_info": { - "datasource_type": "Microsoft.ContainerService/managedClusters", - "object_type": "Datasource", - "resource_id": datasource_id, - "resource_location": cluster_location, - "resource_name": cluster_name, - "resource_type": "Microsoft.ContainerService/managedclusters", - "resource_uri": datasource_id + "backup_instance": backup_instance_payload + }).result() + + # Check and report the protection state + protection_state = backup_instance.get('properties', {}).get('currentProtectionState', 'Unknown') + print(f" Protection State: {protection_state}") + + if protection_state == "ProtectionConfigured": + print(f" [OK] Backup instance created and protection configured") + elif protection_state == "ConfiguringProtection": + print(f" [OK] Backup instance created - protection configuration in progress") + elif protection_state == "ProtectionError": + error_details = backup_instance.get('properties', {}).get('protectionErrorDetails', {}) + error_msg = error_details.get('message', 'Unknown error') if isinstance(error_details, dict) else str(error_details) + print(f" [WARNING] Backup instance created but protection has errors: {error_msg}") + else: + print(f" [OK] Backup instance created") + + return backup_instance, policy_id_for_bi + + +def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy='Week', configuration_params=None): + """ + Enable backup for an AKS cluster. + + Args: + cmd: CLI command context + datasource_id: Full ARM resource ID of the AKS cluster + backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) + configuration_params: Dict with configuration settings + """ + print("=" * 60) + print("Enabling backup for AKS cluster") + print("=" * 60) + print(f"Datasource ID: {datasource_id}") + print(f"Backup Strategy: {backup_strategy}") + + # Parse configuration_params + if configuration_params is None: + configuration_params = {} + if isinstance(configuration_params, str): + configuration_params = json.loads(configuration_params) + + # Validate request (raises on failure) + __validate_request(datasource_id, backup_strategy, configuration_params) + + # Extract configuration values + resource_tags = configuration_params.get("tags") + storage_account_id = configuration_params.get("storage-account-id") + blob_container_name = configuration_params.get("blob-container-name") + backup_resource_group_id = configuration_params.get("backup-resource-group-id") + backup_vault_id = configuration_params.get("backup-vault-id") + backup_policy_id = configuration_params.get("backup-policy-id") + + # Parse cluster details from resource ID + cluster_id_parts = parse_resource_id(datasource_id) + cluster_subscription_id = cluster_id_parts['subscription'] + cluster_resource_group_name = cluster_id_parts['resource_group'] + cluster_name = cluster_id_parts['name'] + + if resource_tags: + print(f"Resource Tags: {json.dumps(resource_tags)}") + + from azure.mgmt.resource import ResourceManagementClient + resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) + + # Pre-check: Verify no existing backup instance for this cluster + print(f"\n[Pre-check] Checking for existing backup...") + __check_existing_backup_instance(resource_client, datasource_id, cluster_name) + + # Step 1: Validate cluster + print(f"\n[1/8] Validating cluster...") + cluster_resource, cluster_location = __validate_cluster(resource_client, datasource_id, cluster_name) + + # Step 2: Setup resource group + print(f"\n[2/8] Setting up backup resource group...") + backup_resource_group, backup_resource_group_name = __setup_resource_group( + cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, + cluster_resource.identity.principal_id, resource_tags) + + # Step 3: Setup storage account + print(f"\n[3/8] Setting up storage account...") + backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = __setup_storage_account( + cmd, cluster_subscription_id, storage_account_id, blob_container_name, + backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) + + # Step 4: Install backup extension + print(f"\n[4/8] Installing backup extension...") + __install_backup_extension( + cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, + backup_storage_account_name, backup_storage_account_container_name, + backup_resource_group_name, backup_storage_account) + + # Step 5: Setup backup vault + print(f"\n[5/8] Setting up backup vault...") + backup_vault, backup_vault_name = __setup_backup_vault( + cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, + cluster_resource, backup_resource_group, resource_tags) + + # Step 6: Setup backup policy + print(f"\n[6/8] Setting up backup policy...") + backup_policy = __setup_backup_policy( + cmd, backup_vault, backup_vault_name, backup_resource_group_name, + backup_strategy, backup_vault_id, backup_policy_id) + + # Step 7: Setup trusted access + print(f"\n[7/8] Setting up trusted access...") + __setup_trusted_access( + cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault) + + # Step 8: Create backup instance + print(f"\n[8/8] Configuring backup instance...") + backup_instance, policy_id_for_bi = __create_backup_instance( + cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, + backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group) + + # Print summary + print("\n" + "=" * 60) + print("Backup enabled successfully!") + print("=" * 60) + print("\nBackup Configuration:") + print(f" * Resource Group: {backup_resource_group.id}") + print(f" * Storage Account: {backup_storage_account.id}") + print(f" * Backup Vault: {backup_vault['id']}") + print(f" * Backup Policy: {policy_id_for_bi}") + print(f" * Backup Instance: {backup_instance.get('id', 'N/A')}") + print("=" * 60) + + +def __get_policy_config_for_strategy(backup_strategy): + """Get backup policy configuration based on strategy.""" + # Default retention based on strategy + retention_duration = "P7D" # Week default + if backup_strategy == 'Month': + retention_duration = "P30D" + elif backup_strategy == 'Immutable': + retention_duration = "P7D" # Op tier, vault tier handled separately + elif backup_strategy == 'DisasterRecovery': + retention_duration = "P7D" # Op tier, vault tier handled separately + + return { + "objectType": "BackupPolicy", + "datasourceTypes": [ + "Microsoft.ContainerService/managedClusters" + ], + "policyRules": [ + { + "isDefault": True, + "lifecycles": [ + { + "deleteAfter": { + "duration": retention_duration, + "objectType": "AbsoluteDeleteOption" + }, + "sourceDataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "targetDataStoreCopySettings": [] + } + ], + "name": "Default", + "objectType": "AzureRetentionRule" + }, + { + "backupParameters": { + "backupType": "Incremental", + "objectType": "AzureBackupParams" }, - "data_source_set_info": { - "datasource_type": "Microsoft.ContainerService/managedClusters", - "object_type": "DatasourceSet", - "resource_id": datasource_id, - "resource_location": cluster_location, - "resource_name": cluster_name, - "resource_type": "Microsoft.ContainerService/managedclusters", - "resource_uri": datasource_id + "dataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" }, - "policy_info": { - "policy_id": backup_policy["id"], - # "policy_id": "/subscriptions/f0c630e0-2995-4853-b056-0b3c09cb673f/resourceGroups/rg2eacanrraj/providers/Microsoft.DataProtection/backupVaults/hackvault/backupPolicies/def", - "policy_parameters": { - "backup_datasource_parameters_list": [ - { - "objectType": "KubernetesClusterBackupDatasourceParameters", - "include_cluster_scope_resources": True, - "snapshot_volumes": True - } + "name": "BackupDaily", + "objectType": "AzureBackupRule", + "trigger": { + "objectType": "ScheduleBasedTriggerContext", + "schedule": { + "repeatingTimeIntervals": [ + "R/2024-01-01T00:00:00+00:00/P1D" ], - "data_store_parameters_list": [ - { - "object_type": "AzureOperationalStoreParameters", - "data_store_type": "OperationalStore", - "resource_group_id": backup_resource_group.id - } - ] - } + "timeZone": "Coordinated Universal Time" + }, + "taggingCriteria": [ + { + "isDefault": True, + "tagInfo": { + "id": "Default_", + "tagName": "Default" + }, + "taggingPriority": 99 + } + ] + } + } + ] + } + + +def __get_backup_instance_payload(backup_instance_name, cluster_name, datasource_id, cluster_location, policy_id, backup_resource_group_id): + """Get backup instance payload for AKS cluster.""" + return { + "backup_instance_name": backup_instance_name, + "properties": { + "friendly_name": f"{cluster_name}\\fullbackup", + "object_type": "BackupInstance", + "data_source_info": { + "datasource_type": "Microsoft.ContainerService/managedClusters", + "object_type": "Datasource", + "resource_id": datasource_id, + "resource_location": cluster_location, + "resource_name": cluster_name, + "resource_type": "Microsoft.ContainerService/managedclusters", + "resource_uri": datasource_id + }, + "data_source_set_info": { + "datasource_type": "Microsoft.ContainerService/managedClusters", + "object_type": "DatasourceSet", + "resource_id": datasource_id, + "resource_location": cluster_location, + "resource_name": cluster_name, + "resource_type": "Microsoft.ContainerService/managedclusters", + "resource_uri": datasource_id + }, + "policy_info": { + "policy_id": policy_id, + "policy_parameters": { + "backup_datasource_parameters_list": [ + { + "objectType": "KubernetesClusterBackupDatasourceParameters", + "include_cluster_scope_resources": True, + "snapshot_volumes": True + } + ], + "data_store_parameters_list": [ + { + "object_type": "AzureOperationalStoreParameters", + "data_store_type": "OperationalStore", + "resource_group_id": backup_resource_group_id + } + ] } } } - }).result() + } - print(f"Kubernetes cluster ({cluster_name}) protected successfully.") def __generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" -def __generate_backup_resource_group_name(cluster_location, cluster_name): - return f"rg_azurebackup_{cluster_location}_{cluster_name}" + +def __generate_backup_resource_group_name(cluster_location): + """ + Generate backup resource group name (one per region, shared across clusters). + + Naming constraints: + - Length: 1-90 characters + - Allowed characters: alphanumerics, underscores, parentheses, hyphens, periods + - Cannot end with a period + + Format: AKSAzureBackup_ (one resource group per region) + Example: AKSAzureBackup_eastasia + """ + return f"AKSAzureBackup_{cluster_location}" + def __generate_backup_storage_account_name(cluster_location): - return f"kubernetesbackup{cluster_location}" + """ + Generate backup storage account name (one per region, shared across clusters). + + Naming constraints: + - Length: 3-24 characters + - Allowed characters: lowercase letters and numbers only + - Must be globally unique + + Format: aksbkp (one storage account per region) + Example: aksbkpeastasia1a2b3c + """ + import uuid + # Remove any non-alphanumeric chars from location and make lowercase + sanitized_location = ''.join(c for c in cluster_location.lower() if c.isalnum()) + # Generate a short GUID suffix for uniqueness + guid_suffix = str(uuid.uuid4()).replace('-', '')[:6] + # Truncate location to fit: 24 chars max - 6 (aksbkp) - 6 (guid) = 12 chars for location + sanitized_location = sanitized_location[:12] + return f"aksbkp{sanitized_location}{guid_suffix}" + + +def __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name): + """ + Generate backup blob container name (unique per cluster). + + Naming constraints: + - Length: 3-63 characters + - Allowed characters: lowercase letters, numbers, and hyphens + - Must start with a letter or number + - Cannot contain consecutive hyphens + + Format: - + Example: contoso-aks-hack-contoso-aks-rg + """ + import re + + def sanitize(name): + # Lowercase, replace invalid chars with hyphens + sanitized = re.sub(r'[^a-z0-9-]', '-', name.lower()) + # Remove consecutive hyphens + sanitized = re.sub(r'-+', '-', sanitized) + # Remove leading/trailing hyphens + return sanitized.strip('-') + + sanitized_cluster = sanitize(cluster_name) + sanitized_rg = sanitize(cluster_resource_group_name) + + # Combine and truncate to 63 chars max + container_name = f"{sanitized_cluster}-{sanitized_rg}" + return container_name[:63].rstrip('-') -def __generate_backup_storage_account_container_name(cluster_name): - return f"backup-{cluster_name}" def __generate_backup_vault_name(cluster_location): - return f"backupvault-{cluster_location}" + """ + Generate backup vault name (one per region, shared across clusters). + + Naming constraints: + - Length: 2-50 characters + - Allowed characters: alphanumerics and hyphens + - Must start with a letter + - Cannot end with a hyphen + + Format: AKSAzureBackup- (one vault per region) + Example: AKSAzureBackup-eastasia + """ + return f"AKSAzureBackup-{cluster_location}" -def __generate_backup_policy_name(): - return f"defaultbackuppolicy" -def __generate_trusted_access_role_binding_name(backup_vault_name): - return f"backup-howtogetid" +def __generate_backup_policy_name(backup_strategy): + """ + Generate backup policy name (shared per strategy). + + Naming constraints: + - Length: 3-150 characters + - Allowed characters: alphanumerics and hyphens + + Format: AKSBackupPolicy- + """ + return f"AKSBackupPolicy-{backup_strategy}" + + +def __generate_trusted_access_role_binding_name(): + """ + Generate trusted access role binding name using GUID. + + Naming constraints: + - Length: 1-24 characters + - Allowed characters: alphanumerics, underscores, hyphens + + Format: tarb- + Example: tarb-a1b2c3d4e5f6g7h8 + """ + import uuid + # Generate GUID and take first 16 chars (without hyphens) + guid_suffix = str(uuid.uuid4()).replace('-', '')[:16] + # "tarb-" (5 chars) + guid (16 chars) = 21 chars + return f"tarb-{guid_suffix}" def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name, storage_account_name, storage_account_container_name, storage_account_resource_group, storage_account_subscription_id): from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient @@ -270,10 +1042,28 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster for page in extensions.by_page(): for extension in page: if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': - print(f"Data protection extension ({extension.name}) is already installed in the cluster ({cluster_name}).") - return extension + # Check extension provisioning state + provisioning_state = extension.provisioning_state + if provisioning_state == "Succeeded": + print(f" Data protection extension ({extension.name}) is already installed and healthy.") + return extension + elif provisioning_state == "Failed": + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' exists on cluster '{cluster_name}' but is in Failed state.\n" + f"Please take corrective action before running this command again:\n" + f" 1. Check extension logs: az k8s-extension show --name {extension.name} --cluster-name {cluster_name} --resource-group {resource_group_name} --cluster-type managedClusters\n" + f" 2. Delete the failed extension: az k8s-extension delete --name {extension.name} --cluster-name {cluster_name} --resource-group {resource_group_name} --cluster-type managedClusters --yes\n" + f" 3. Re-run this command to install a fresh extension.\n" + f"For troubleshooting, visit: https://aka.ms/aksclusterbackup" + ) + else: + # Extension is in a transient state (Creating, Updating, Deleting, etc.) + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' is in '{provisioning_state}' state.\n" + f"Please wait for the operation to complete and try again." + ) - print(f"Installing data protection extension (azure-aks-backup) in the cluster ({cluster_name}) ...") + print(f" Installing data protection extension (azure-aks-backup)...") from azure.cli.core.extension.operations import add_extension_to_path from importlib import import_module @@ -281,7 +1071,7 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster K8s_extension_client_factory = import_module("azext_k8s_extension._client_factory") k8s_extension_module = import_module("azext_k8s_extension.custom") - return k8s_extension_module.create_k8s_extension( + extension = k8s_extension_module.create_k8s_extension( cmd=cmd, client=K8s_extension_client_factory.cf_k8s_extension_operation(cmd.cli_ctx), resource_group_name=resource_group_name, @@ -300,23 +1090,11 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster "storageAccountSubscriptionId": storage_account_subscription_id }] ).result() - - # - # Check if there is an SA in cluster RG with azure tag - clusterName = backup - # If not, create one with name bkp- (4) (4) sha256 of cluster URI - # - # - # - # P2 - Using Extension routing, if there is a BI already for the cluster. If there, is print the vault name where it resides. (this can be the very first step) - # - # - # Check if there is a backupvault in the subscription with tag, default=true - # - # If there is no such backup vault, create a resource group with tag backup-resource-group=true - # Create a backup vault in the resource group with tag default=true - # - # Check if the Vault has a policy with params matching Recommended Policy params - # - # - # - # + + # Verify extension is in healthy state after installation + if extension.provisioning_state == "Succeeded": + print(f" Extension installed and healthy (Provisioning State: Succeeded)") + else: + print(f" Warning: Extension provisioning state is '{extension.provisioning_state}'") + + return extension \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/manual/commands.py b/src/dataprotection/azext_dataprotection/manual/commands.py index 8be71b51c75..2bc02116101 100644 --- a/src/dataprotection/azext_dataprotection/manual/commands.py +++ b/src/dataprotection/azext_dataprotection/manual/commands.py @@ -131,7 +131,5 @@ def load_command_table(self, _): g.custom_command('list', 'dataprotection_recovery_point_list') with self.command_group('dataprotection enable-backup', exception_handler=exception_handler) as g: - g.custom_command('initialize', 'dataprotection_enable_backup', supports_no_wait=False) - g.custom_command('fix', 'dataprotection_enable_backup', supports_no_wait=True) - g.custom_command('debug', 'dataprotection_enable_backup', supports_no_wait=True) + g.custom_command('trigger', 'dataprotection_enable_backup', supports_no_wait=True) \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index d66a784f9fa..4f50c91a9b8 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -32,7 +32,6 @@ QueryRequest, QueryRequestOptions from azext_dataprotection.manual import backupcenter_helper, helpers as helper from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as BackupVaultGet -from src.dataprotection.azext_dataprotection.manual.enums import CONST_RECOMMENDED logger = get_logger(__name__) @@ -1152,12 +1151,75 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, return restore_request -def dataprotection_enable_backup(cmd, datasource_uri: str, backup_strategy=CONST_RECOMMENDED, configuration_params=None): - - # if uri contains case insensitive Microsoft.ContainerService/managedClusters contains and add if check - if "Microsoft.ContainerService/managedClusters".lower() in datasource_uri.lower(): +def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_strategy=None, configuration_settings=None): + """Enable backup for a datasource using a single command. + + This command orchestrates all the steps required to enable backup: + - Creates backup infrastructure (resource group, storage account, vault) + - Installs required extensions + - Configures backup instance with specified strategy + """ + from azext_dataprotection.manual.enums import get_backup_strategies_for_datasource + + # Supported datasource types + supported_datasource_types = ["AzureKubernetesService"] + + # Validate datasource type is supported + if datasource_type not in supported_datasource_types: + raise InvalidArgumentValueError( + f"Unsupported datasource type: {datasource_type}. " + f"Supported types: {', '.join(supported_datasource_types)}" + ) + + # Get valid strategies for this datasource type + valid_strategies = get_backup_strategies_for_datasource(datasource_type) + + # Set default strategy based on datasource type + if backup_strategy is None: + if datasource_type == "AzureKubernetesService": + backup_strategy = 'Week' + # Add defaults for other datasource types here as they are supported + + # Validate strategy for datasource type + if backup_strategy not in valid_strategies: + raise InvalidArgumentValueError( + f"Invalid backup-strategy '{backup_strategy}' for {datasource_type}. " + f"Allowed values: {', '.join(valid_strategies)}" + ) + + # Parse configuration settings if it's a string (from file) + config = _parse_configuration_settings(configuration_settings) + + # Route to datasource-specific handler + if datasource_type == "AzureKubernetesService": + if "Microsoft.ContainerService/managedClusters".lower() not in datasource_id.lower(): + raise InvalidArgumentValueError( + "datasource-id must be an AKS cluster resource ID for AzureKubernetesService datasource type" + ) + from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper - dataprotection_enable_backup_helper(cmd, datasource_uri, backup_strategy, configuration_params) + dataprotection_enable_backup_helper(cmd, datasource_id, backup_strategy, config) return - else: - raise InvalidArgumentValueError("Unsupported datasource type for command") + + +def _parse_configuration_settings(configuration_settings): + """Parse configuration settings from file or dict into a dictionary.""" + import json + + if configuration_settings is None: + return {} + + # If it's already a dict, return as-is + if isinstance(configuration_settings, dict): + return configuration_settings + + # If it's a string, try to parse as JSON + if isinstance(configuration_settings, str): + try: + return json.loads(configuration_settings) + except json.JSONDecodeError: + raise InvalidArgumentValueError( + f"Invalid JSON in configuration-settings: '{configuration_settings}'" + ) + + return {} diff --git a/src/dataprotection/azext_dataprotection/manual/enums.py b/src/dataprotection/azext_dataprotection/manual/enums.py index ed2af569414..fd2291356f8 100644 --- a/src/dataprotection/azext_dataprotection/manual/enums.py +++ b/src/dataprotection/azext_dataprotection/manual/enums.py @@ -93,12 +93,28 @@ def get_persistent_volume_restore_mode_values(): def get_conflict_policy_values(): return ['Skip', 'Patch'] -CONST_RECOMMENDED = 'Recommended' -CONST_DEFAULT = 'Default' -CONST_DAILY = 'Daily' - -backup_presets = [ - CONST_RECOMMENDED, - CONST_DEFAULT, - CONST_DAILY -] \ No newline at end of file + +def get_aks_backup_strategies(): + from azext_dataprotection.manual._consts import CONST_AKS_BACKUP_STRATEGIES + return CONST_AKS_BACKUP_STRATEGIES + + +# Export backup_presets for use in aks-preview +backup_presets = get_aks_backup_strategies() + + +def get_all_backup_strategies(): + """Returns all backup strategies across all workload types.""" + all_strategies = set() + all_strategies.update(get_aks_backup_strategies()) + # Add other workload strategies here as they are supported + # all_strategies.update(get_postgres_backup_strategies()) + return list(all_strategies) + + +def get_backup_strategies_for_datasource(datasource_type): + """Returns valid backup strategies for a given datasource type.""" + strategies = { + "AzureKubernetesService": get_aks_backup_strategies(), + } + return strategies.get(datasource_type, []) \ No newline at end of file From e9adce52f4d3bb457c73300ff03c14f996ea7094 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:02:38 +0530 Subject: [PATCH 07/24] [AKS] Update backup configuration parameters and improve argument handling Signed-off-by: Anshul Ahuja --- src/aks-preview/azext_aks_preview/_params.py | 6 +- .../managed_cluster_decorator.py | 4 +- .../azext_dataprotection/manual/_params.py | 9 +- .../manual/aks/aks_helper.py | 336 ++++++++++-------- .../azext_dataprotection/manual/custom.py | 34 +- 5 files changed, 230 insertions(+), 159 deletions(-) diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index 1e1c5f86d0b..9890ce47080 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -1757,8 +1757,10 @@ def load_arguments(self, _): ) ) c.argument("enable_backup", help="Enable backup for the cluster", is_preview=True, action="store_true") - c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Recommended.", is_preview=True) - c.argument("backup_configuration_parameters", type=validate_file_or_dict, help="Backup configuration overrides.", is_preview=True) + c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Week.", is_preview=True) + c.argument("backup_configuration_file", type=validate_file_or_dict, + options_list=['--backup-configuration-file', '-f'], + help="Path to backup configuration file (JSON) or inline JSON string.", is_preview=True) # In update scenario, use emtpy str as default. c.argument('ssh_access', arg_type=get_enum_type(ssh_accesses), is_preview=True) c.argument('enable_static_egress_gateway', is_preview=True, action='store_true') diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index 6094fd7f527..e68b57657b5 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -7395,7 +7395,7 @@ def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: ) backup_strategy = self.context.raw_param.get("backup_strategy") - backup_configuration_parameters = self.context.raw_param.get("backup_configuration_parameters") + backup_configuration_file = self.context.raw_param.get("backup_configuration_file") # Build the cluster resource ID cluster_resource_id = ( @@ -7404,7 +7404,7 @@ def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: f"/providers/Microsoft.ContainerService/managedClusters/{self.context.get_name()}" ) - dataprotection_enable_backup_helper(self.cmd, str(cluster_resource_id), backup_strategy, backup_configuration_parameters) + dataprotection_enable_backup_helper(self.cmd, str(cluster_resource_id), backup_strategy, backup_configuration_file) return mc def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index 7945be7eadb..7eec015f616 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -191,10 +191,11 @@ def load_arguments(self, _): help="Backup strategy preset. For AzureKubernetesService: Week (7-day retention), Month (30-day retention), " "Immutable (7-day Op + 90-day Vault Tier), DisasterRecovery (GRS+CRR), Custom (bring your own vault/policy). " "Default: Week.") - c.argument('configuration_settings', type=validate_file_or_dict, - help="Configuration settings file or JSON string. Expected value: json-string/@json-file. " - "Available settings: storage-account-id, blob-container-name, backup-resource-group-id, " - "backup-vault-id (required for Custom), backup-policy-id (required for Custom), tags.") + c.argument('backup_configuration_file', type=validate_file_or_dict, + options_list=['--backup-configuration-file', '-f'], + help="Path to backup configuration file (JSON) or inline JSON string. " + "Available settings: storageAccountResourceId, blobContainerName, backupResourceGroupId, " + "backupVaultId (required for Custom), backupPolicyId (required for Custom), tags.") with self.argument_context('dataprotection job show') as c: c.argument('resource_group_name', resource_group_name_type) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 0ddc80d06dd..54d68ef82f2 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -40,7 +40,7 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" ) if existing_assignments: - print(f" Role '{role}' already assigned to {identity_name}") + print(f"\tRole '{role}' already assigned to {identity_name}") return True except Exception: # If we can't list, we'll try to create and handle any errors there @@ -56,7 +56,7 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" assignee=assignee, scope=scope ) - print(f" Role '{role}' assigned to {identity_name}") + print(f"\tRole '{role}' assigned to {identity_name}") return True except (HttpResponseError, Exception) as e: error_message = str(e) @@ -64,7 +64,7 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" # Check if this is a "already exists" conflict (409) if "already exists" in error_message.lower() or "conflict" in error_message.lower(): - print(f" Role '{role}' already assigned to {identity_name}") + print(f"\tRole '{role}' already assigned to {identity_name}") return True # Check if this is a permission/authorization error (not retryable) @@ -80,7 +80,7 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" # Check if this is a "principal not found" error (retryable - identity propagation) if "cannot find" in error_message.lower() or "does not exist" in error_message.lower() or "principal" in error_message.lower(): if attempt < max_retries - 1: - print(f" Waiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") + print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") time.sleep(retry_delay) continue @@ -104,7 +104,13 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): Args: datasource_id: Full ARM resource ID of the AKS cluster backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) - configuration_params: Dict with configuration settings + configuration_params: Dict with configuration settings (camelCase keys) + - storageAccountResourceId: Storage account resource ID + - blobContainerName: Blob container name + - backupResourceGroupId: Resource group for backup resources + - backupVaultId: Backup vault resource ID (required for Custom) + - backupPolicyId: Backup policy resource ID (required for Custom) + - tags: Resource tags dict """ # Ensure configuration_params is a dict if configuration_params is None: @@ -115,17 +121,17 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): try: json.loads(configuration_params) except json.JSONDecodeError: - raise InvalidArgumentValueError("Invalid JSON in configuration-settings") + raise InvalidArgumentValueError("Invalid JSON in backup-configuration-file") # Validate Custom strategy requirements if backup_strategy == 'Custom': - if not configuration_params.get("backup-vault-id"): + if not configuration_params.get("backupVaultId"): raise InvalidArgumentValueError( - "backup-vault-id is required in --configuration-settings when using Custom strategy" + "backupVaultId is required in --backup-configuration-file when using Custom strategy" ) - if not configuration_params.get("backup-policy-id"): + if not configuration_params.get("backupPolicyId"): raise InvalidArgumentValueError( - "backup-policy-id is required in --configuration-settings when using Custom strategy" + "backupPolicyId is required in --backup-configuration-file when using Custom strategy" ) # Parse cluster subscription for validation @@ -133,30 +139,30 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): cluster_subscription_id = cluster_id_parts['subscription'] # Validate provided resource IDs are in the same subscription as cluster - backup_resource_group_id = configuration_params.get("backup-resource-group-id") + backup_resource_group_id = configuration_params.get("backupResourceGroupId") if backup_resource_group_id: rg_parts = parse_resource_id(backup_resource_group_id) if rg_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( - f"backup-resource-group-id must be in the same subscription as the cluster. " + f"backupResourceGroupId must be in the same subscription as the cluster. " f"Cluster subscription: {cluster_subscription_id}, Resource group subscription: {rg_parts['subscription']}" ) - storage_account_id = configuration_params.get("storage-account-id") + storage_account_id = configuration_params.get("storageAccountResourceId") if storage_account_id: sa_parts = parse_resource_id(storage_account_id) if sa_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( - f"storage-account-id must be in the same subscription as the cluster. " + f"storageAccountResourceId must be in the same subscription as the cluster. " f"Cluster subscription: {cluster_subscription_id}, Storage account subscription: {sa_parts['subscription']}" ) - backup_vault_id = configuration_params.get("backup-vault-id") + backup_vault_id = configuration_params.get("backupVaultId") if backup_vault_id: vault_parts = parse_resource_id(backup_vault_id) if vault_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( - f"backup-vault-id must be in the same subscription as the cluster. " + f"backupVaultId must be in the same subscription as the cluster. " f"Cluster subscription: {cluster_subscription_id}, Backup vault subscription: {vault_parts['subscription']}" ) @@ -170,7 +176,7 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam Returns: None if no backup instance exists, raises error with details if one exists """ - print(f" Checking for existing backup configuration...") + print(f"\tChecking for existing backup configuration...") try: # Use extension routing to query backup instances on the cluster @@ -191,7 +197,7 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam # If list is empty, no backup instance exists if not bi_list: - print(f" No existing backup instance found") + print(f"\tNo existing backup instance found") return None # Get details of the first backup instance @@ -217,16 +223,16 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam vault_name = bi_parts.get('name', 'Unknown') vault_rg = bi_parts.get('resource_group', 'Unknown') - print(f" Found existing backup instance!") - print(f" - Backup Instance: {bi_name}") - print(f" - Backup Vault: {vault_name}") - print(f" - Resource Group: {vault_rg}") - print(f" - Protection State: {protection_status}") + print(f"\tFound existing backup instance!") + print(f"\t\t- Backup Instance: {bi_name}") + print(f"\t\t- Backup Vault: {vault_name}") + print(f"\t\t- Resource Group: {vault_rg}") + print(f"\t\t- Protection State: {protection_status}") error_info = "" if protection_error: error_msg = protection_error.get('message', str(protection_error)) if isinstance(protection_error, dict) else str(protection_error) - print(f" - Error Details: {error_msg[:100]}..." if len(str(error_msg)) > 100 else f" - Error Details: {error_msg}") + print(f"\t\t- Error Details: {error_msg[:100]}..." if len(str(error_msg)) > 100 else f" - Error Details: {error_msg}") error_info = f"\n Protection Error: {error_msg}\n" raise InvalidArgumentValueError( @@ -252,13 +258,13 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam # 404 or other errors mean no backup instance exists - that's fine error_str = str(e).lower() if "not found" in error_str or "404" in error_str or "does not exist" in error_str: - print(f" No existing backup instance found") + print(f"\tNo existing backup instance found") return None # For other errors, log and continue (don't block on extension routing failures) - print(f" Could not check for existing backup (will proceed): {str(e)[:100]}") + print(f"\tCould not check for existing backup (will proceed): {str(e)[:100]}") return None - print(f" No existing backup instance found") + print(f"\tNo existing backup instance found") return None @@ -266,9 +272,9 @@ def __validate_cluster(resource_client, datasource_id, cluster_name): """Validate the AKS cluster exists and get its details.""" cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") cluster_location = cluster_resource.location - print(f" Cluster: {cluster_name}") - print(f" Location: {cluster_location}") - print(f" [OK] Cluster validated") + print(f"\tCluster: {cluster_name}") + print(f"\tLocation: {cluster_location}") + print(f"\t[OK] Cluster validated") return cluster_resource, cluster_location @@ -299,27 +305,27 @@ def __setup_resource_group(cmd, resource_client, backup_resource_group_id, clust """Create or use backup resource group.""" if backup_resource_group_id: backup_resource_group_name = parse_resource_id(backup_resource_group_id)['resource_group'] - print(f" Using provided resource group: {backup_resource_group_name}") + print(f"\tUsing provided resource group: {backup_resource_group_name}") try: backup_resource_group = resource_client.resource_groups.get(backup_resource_group_name) except Exception: raise InvalidArgumentValueError( f"Resource group '{backup_resource_group_name}' not found. " - f"Please ensure the resource group exists or remove 'backup-resource-group-id' from configuration to create one automatically." + f"Please ensure the resource group exists or remove 'backupResourceGroupId' from configuration to create one automatically." ) else: # Search for existing backup resource group with matching tag - print(f" Searching for existing AKS backup resource group in region {cluster_location}...") + print(f"\tSearching for existing AKS backup resource group in region {cluster_location}...") backup_resource_group = __find_existing_backup_resource_group(resource_client, cluster_location) if backup_resource_group: # Found existing resource group - reuse it backup_resource_group_name = backup_resource_group.name - print(f" Found existing backup resource group: {backup_resource_group_name}") + print(f"\tFound existing backup resource group: {backup_resource_group_name}") else: # Create new resource group with AKS backup tag backup_resource_group_name = __generate_backup_resource_group_name(cluster_location) - print(f" Creating resource group: {backup_resource_group_name}") + print(f"\tCreating resource group: {backup_resource_group_name}") # Build tags - include AKS backup tag plus any user-provided tags rg_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -329,14 +335,14 @@ def __setup_resource_group(cmd, resource_client, backup_resource_group_id, clust rg_params = {"location": cluster_location, "tags": rg_tags} backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, rg_params) - print(f" Resource Group: {backup_resource_group.id}") + print(f"\tResource Group: {backup_resource_group.id}") __check_and_assign_role( cmd, role="Contributor", assignee=cluster_identity_principal_id, scope=backup_resource_group.id, identity_name="cluster identity") - print(f" [OK] Resource group ready") + print(f"\t[OK] Resource group ready") return backup_resource_group, backup_resource_group_name @@ -378,23 +384,23 @@ def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, bl sa_parts = parse_resource_id(storage_account_id) backup_storage_account_name = sa_parts['name'] storage_account_rg = sa_parts['resource_group'] - print(f" Using provided storage account: {backup_storage_account_name}") + print(f"\tUsing provided storage account: {backup_storage_account_name}") backup_storage_account = storage_client.storage_accounts.get_properties(storage_account_rg, backup_storage_account_name) backup_storage_account_container_name = blob_container_name if blob_container_name else __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) else: # Search for existing backup storage account with matching tag - print(f" Searching for existing AKS backup storage account in region {cluster_location}...") + print(f"\tSearching for existing AKS backup storage account in region {cluster_location}...") backup_storage_account, existing_rg = __find_existing_backup_storage_account(storage_client, cluster_location) if backup_storage_account: # Found existing storage account - reuse it backup_storage_account_name = backup_storage_account.name storage_account_rg = existing_rg - print(f" Found existing backup storage account: {backup_storage_account_name}") + print(f"\tFound existing backup storage account: {backup_storage_account_name}") else: # Create new storage account with AKS backup tag backup_storage_account_name = __generate_backup_storage_account_name(cluster_location) - print(f" Creating storage account: {backup_storage_account_name}") + print(f"\tCreating storage account: {backup_storage_account_name}") # Build tags - include AKS backup tag plus any user-provided tags sa_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -415,10 +421,10 @@ def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, bl backup_storage_account_container_name = __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) - print(f" Storage Account: {backup_storage_account.id}") - print(f" Creating blob container: {backup_storage_account_container_name}") + print(f"\tStorage Account: {backup_storage_account.id}") + print(f"\tCreating blob container: {backup_storage_account_container_name}") storage_client.blob_containers.create(storage_account_rg, backup_storage_account_name, backup_storage_account_container_name, {}) - print(f" [OK] Storage account ready") + print(f"\t[OK] Storage account ready") return backup_storage_account, backup_storage_account_name, backup_storage_account_container_name @@ -441,7 +447,7 @@ def __install_backup_extension(cmd, cluster_subscription_id, cluster_resource_gr assignee=backup_extension.aks_assigned_identity.principal_id, scope=backup_storage_account.id, identity_name="backup extension identity") - print(f" [OK] Backup extension ready") + print(f"\t[OK] Backup extension ready") return backup_extension @@ -482,7 +488,7 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip vault_parts = parse_resource_id(backup_vault_id) backup_vault_name = vault_parts['name'] vault_rg = vault_parts['resource_group'] - print(f" Using provided backup vault: {backup_vault_name}") + print(f"\tUsing provided backup vault: {backup_vault_name}") from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as _BackupVaultShow backup_vault = _BackupVaultShow(cli_ctx=cmd.cli_ctx)(command_args={ "vault_name": backup_vault_name, @@ -490,17 +496,17 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip }) else: # Search for existing backup vault with matching tag - print(f" Searching for existing AKS backup vault in region {cluster_location}...") + print(f"\tSearching for existing AKS backup vault in region {cluster_location}...") backup_vault = __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) if backup_vault: # Found existing vault - reuse it backup_vault_name = backup_vault['name'] - print(f" Found existing backup vault: {backup_vault_name}") + print(f"\tFound existing backup vault: {backup_vault_name}") else: # Create new backup vault with AKS backup tag backup_vault_name = __generate_backup_vault_name(cluster_location) - print(f" Creating backup vault: {backup_vault_name}") + print(f"\tCreating backup vault: {backup_vault_name}") # Build tags - include AKS backup tag plus any user-provided tags vault_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -517,7 +523,7 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip } backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() - print(f" Backup Vault: {backup_vault['id']}") + print(f"\tBackup Vault: {backup_vault['id']}") __check_and_assign_role( cmd, role="Reader", @@ -531,7 +537,7 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip assignee=backup_vault["identity"]["principalId"], scope=backup_resource_group.id, identity_name="backup vault identity (on resource group)") - print(f" [OK] Backup vault ready") + print(f"\t[OK] Backup vault ready") return backup_vault, backup_vault_name @@ -545,7 +551,7 @@ def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_ if backup_strategy == 'Custom' and backup_policy_id: # Use provided policy for Custom strategy backup_policy_name = parse_resource_id(backup_policy_id)['name'] - print(f" Using provided backup policy: {backup_policy_name}") + print(f"\tUsing provided backup policy: {backup_policy_name}") backup_policy = {"id": backup_policy_id} else: # Get vault RG - for custom with provided vault, use vault's RG @@ -569,12 +575,12 @@ def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_ pass if existing_policy: - print(f" Found existing backup policy: {backup_policy_name}") + print(f"\tFound existing backup policy: {backup_policy_name}") backup_policy = existing_policy else: # Create policy based on strategy policy_config = __get_policy_config_for_strategy(backup_strategy) - print(f" Creating backup policy: {backup_policy_name}") + print(f"\tCreating backup policy: {backup_policy_name}") backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ "backup_policy_name": backup_policy_name, @@ -583,8 +589,8 @@ def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_ "policy": policy_config }) - print(f" Backup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") - print(f" [OK] Backup policy ready") + print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") + print(f"\t[OK] Backup policy ready") return backup_policy @@ -598,12 +604,12 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ vault_id = backup_vault["id"] vault_name = backup_vault["name"] - print(f" Configuring trusted access between:") - print(f" - Backup Vault: {vault_name}") - print(f" - AKS Cluster: {cluster_name}") + print(f"\tConfiguring trusted access between:") + print(f"\t\t- Backup Vault: {vault_name}") + print(f"\t\t- AKS Cluster: {cluster_name}") # Check if trusted access binding already exists for this vault-cluster pair - print(f" Checking for existing trusted access binding...") + print(f"\tChecking for existing trusted access binding...") try: existing_bindings = cluster_client.trusted_access_role_bindings.list( resource_group_name=cluster_resource_group_name, @@ -611,8 +617,8 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ ) for binding in existing_bindings: if binding.source_resource_id.lower() == vault_id.lower(): - print(f" Found existing binding: {binding.name}") - print(f" [OK] Trusted access already configured") + print(f"\tFound existing binding: {binding.name}") + print(f"\t[OK] Trusted access already configured") return except Exception: # If we can't list, we'll try to create @@ -620,8 +626,8 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ # Create new trusted access role binding with GUID-based name binding_name = __generate_trusted_access_role_binding_name() - print(f" Creating trusted access role binding: {binding_name}") - print(f" Role: Microsoft.DataProtection/backupVaults/backup-operator") + print(f"\tCreating trusted access role binding: {binding_name}") + print(f"\t\tRole: Microsoft.DataProtection/backupVaults/backup-operator") _trusted_access_role_binding = TrustedAccessRoleBinding( source_resource_id=vault_id, @@ -632,7 +638,7 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ resource_name=cluster_name, trusted_access_role_binding_name=binding_name, trusted_access_role_binding=_trusted_access_role_binding).result() - print(f" [OK] Trusted access configured - vault can now access cluster for backup operations") + print(f"\t[OK] Trusted access configured - vault can now access cluster for backup operations") def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group): @@ -650,7 +656,7 @@ def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, dat # Get policy ID policy_id_for_bi = backup_policy.get("id") if isinstance(backup_policy, dict) else backup_policy_id - print(f" Creating backup instance: {backup_instance_name}") + print(f"\tCreating backup instance: {backup_instance_name}") backup_instance_payload = __get_backup_instance_payload( backup_instance_name=backup_instance_name, cluster_name=cluster_name, @@ -669,18 +675,18 @@ def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, dat # Check and report the protection state protection_state = backup_instance.get('properties', {}).get('currentProtectionState', 'Unknown') - print(f" Protection State: {protection_state}") + print(f"\tProtection State: {protection_state}") if protection_state == "ProtectionConfigured": - print(f" [OK] Backup instance created and protection configured") + print(f"\t[OK] Backup instance created and protection configured") elif protection_state == "ConfiguringProtection": - print(f" [OK] Backup instance created - protection configuration in progress") + print(f"\t[OK] Backup instance created - protection configuration in progress") elif protection_state == "ProtectionError": error_details = backup_instance.get('properties', {}).get('protectionErrorDetails', {}) error_msg = error_details.get('message', 'Unknown error') if isinstance(error_details, dict) else str(error_details) - print(f" [WARNING] Backup instance created but protection has errors: {error_msg}") + print(f"\t[WARNING] Backup instance created but protection has errors: {error_msg}") else: - print(f" [OK] Backup instance created") + print(f"\t[OK] Backup instance created") return backup_instance, policy_id_for_bi @@ -710,13 +716,13 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy # Validate request (raises on failure) __validate_request(datasource_id, backup_strategy, configuration_params) - # Extract configuration values + # Extract configuration values (camelCase keys) resource_tags = configuration_params.get("tags") - storage_account_id = configuration_params.get("storage-account-id") - blob_container_name = configuration_params.get("blob-container-name") - backup_resource_group_id = configuration_params.get("backup-resource-group-id") - backup_vault_id = configuration_params.get("backup-vault-id") - backup_policy_id = configuration_params.get("backup-policy-id") + storage_account_id = configuration_params.get("storageAccountResourceId") + blob_container_name = configuration_params.get("blobContainerName") + backup_resource_group_id = configuration_params.get("backupResourceGroupId") + backup_vault_id = configuration_params.get("backupVaultId") + backup_policy_id = configuration_params.get("backupPolicyId") # Parse cluster details from resource ID cluster_id_parts = parse_resource_id(datasource_id) @@ -794,72 +800,124 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy def __get_policy_config_for_strategy(backup_strategy): - """Get backup policy configuration based on strategy.""" - # Default retention based on strategy - retention_duration = "P7D" # Week default - if backup_strategy == 'Month': - retention_duration = "P30D" + """Get backup policy configuration based on strategy. + + Strategies: + - Week: 7 days operational tier, 7 days vault tier + - Month: 30 days operational tier, 30 days vault tier + - Immutable: 7 days operational tier, 30 days vault tier (with immutable retention) + - DisasterRecovery: 7 days operational tier, 90 days vault tier (for cross-region restore) + """ + # Operational tier retention based on strategy + op_tier_retention = "P7D" # Week default + vault_tier_retention = "P7D" # Week default + + if backup_strategy == 'Week': + op_tier_retention = "P7D" + vault_tier_retention = "P7D" + elif backup_strategy == 'Month': + op_tier_retention = "P30D" + vault_tier_retention = "P30D" elif backup_strategy == 'Immutable': - retention_duration = "P7D" # Op tier, vault tier handled separately + op_tier_retention = "P7D" + vault_tier_retention = "P30D" # Longer vault retention for immutable elif backup_strategy == 'DisasterRecovery': - retention_duration = "P7D" # Op tier, vault tier handled separately + op_tier_retention = "P7D" + vault_tier_retention = "P90D" # 90 days for DR scenarios + + policy_rules = [ + # Operational Store Default Retention Rule + { + "isDefault": True, + "lifecycles": [ + { + "deleteAfter": { + "duration": op_tier_retention, + "objectType": "AbsoluteDeleteOption" + }, + "sourceDataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "targetDataStoreCopySettings": [] + } + ], + "name": "Default", + "objectType": "AzureRetentionRule" + }, + # Vault Store Retention Rule + { + "isDefault": False, + "lifecycles": [ + { + "deleteAfter": { + "duration": vault_tier_retention, + "objectType": "AbsoluteDeleteOption" + }, + "sourceDataStore": { + "dataStoreType": "VaultStore", + "objectType": "DataStoreInfoBase" + }, + "targetDataStoreCopySettings": [] + } + ], + "name": "Vault", + "objectType": "AzureRetentionRule" + }, + # Backup Rule - Daily backup to Operational Store + { + "backupParameters": { + "backupType": "Incremental", + "objectType": "AzureBackupParams" + }, + "dataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "name": "BackupDaily", + "objectType": "AzureBackupRule", + "trigger": { + "objectType": "ScheduleBasedTriggerContext", + "schedule": { + "repeatingTimeIntervals": [ + "R/2024-01-01T00:00:00+00:00/P1D" + ], + "timeZone": "Coordinated Universal Time" + }, + "taggingCriteria": [ + { + "isDefault": True, + "tagInfo": { + "id": "Default_", + "tagName": "Default" + }, + "taggingPriority": 99 + }, + { + "isDefault": False, + "tagInfo": { + "id": "Vault_", + "tagName": "Vault" + }, + "taggingPriority": 50, + "criteria": [ + { + "objectType": "ScheduleBasedBackupCriteria", + "absoluteCriteria": ["FirstOfDay"] + } + ] + } + ] + } + } + ] return { "objectType": "BackupPolicy", "datasourceTypes": [ "Microsoft.ContainerService/managedClusters" ], - "policyRules": [ - { - "isDefault": True, - "lifecycles": [ - { - "deleteAfter": { - "duration": retention_duration, - "objectType": "AbsoluteDeleteOption" - }, - "sourceDataStore": { - "dataStoreType": "OperationalStore", - "objectType": "DataStoreInfoBase" - }, - "targetDataStoreCopySettings": [] - } - ], - "name": "Default", - "objectType": "AzureRetentionRule" - }, - { - "backupParameters": { - "backupType": "Incremental", - "objectType": "AzureBackupParams" - }, - "dataStore": { - "dataStoreType": "OperationalStore", - "objectType": "DataStoreInfoBase" - }, - "name": "BackupDaily", - "objectType": "AzureBackupRule", - "trigger": { - "objectType": "ScheduleBasedTriggerContext", - "schedule": { - "repeatingTimeIntervals": [ - "R/2024-01-01T00:00:00+00:00/P1D" - ], - "timeZone": "Coordinated Universal Time" - }, - "taggingCriteria": [ - { - "isDefault": True, - "tagInfo": { - "id": "Default_", - "tagName": "Default" - }, - "taggingPriority": 99 - } - ] - } - } - ] + "policyRules": policy_rules } @@ -1045,7 +1103,7 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster # Check extension provisioning state provisioning_state = extension.provisioning_state if provisioning_state == "Succeeded": - print(f" Data protection extension ({extension.name}) is already installed and healthy.") + print(f"\tData protection extension ({extension.name}) is already installed and healthy.") return extension elif provisioning_state == "Failed": raise InvalidArgumentValueError( @@ -1063,7 +1121,7 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster f"Please wait for the operation to complete and try again." ) - print(f" Installing data protection extension (azure-aks-backup)...") + print(f"\tInstalling data protection extension (azure-aks-backup)...") from azure.cli.core.extension.operations import add_extension_to_path from importlib import import_module @@ -1093,8 +1151,8 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster # Verify extension is in healthy state after installation if extension.provisioning_state == "Succeeded": - print(f" Extension installed and healthy (Provisioning State: Succeeded)") + print(f"\tExtension installed and healthy (Provisioning State: Succeeded)") else: - print(f" Warning: Extension provisioning state is '{extension.provisioning_state}'") + print(f"\tWarning: Extension provisioning state is '{extension.provisioning_state}'") return extension \ No newline at end of file diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 4f50c91a9b8..673eeec8e5a 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1151,7 +1151,7 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, return restore_request -def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_strategy=None, configuration_settings=None): +def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_strategy=None, backup_configuration_file=None): """Enable backup for a datasource using a single command. This command orchestrates all the steps required to enable backup: @@ -1187,8 +1187,8 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_str f"Allowed values: {', '.join(valid_strategies)}" ) - # Parse configuration settings if it's a string (from file) - config = _parse_configuration_settings(configuration_settings) + # Parse configuration from file or dict + config = _parse_backup_configuration(backup_configuration_file) # Route to datasource-specific handler if datasource_type == "AzureKubernetesService": @@ -1202,24 +1202,34 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_str return -def _parse_configuration_settings(configuration_settings): - """Parse configuration settings from file or dict into a dictionary.""" +def _parse_backup_configuration(backup_configuration_file): + """Parse backup configuration from file or dict into a dictionary. + + Args: + backup_configuration_file: Can be: + - None: Returns empty dict + - dict: Returns as-is (already parsed by validate_file_or_dict) + - str: JSON string to parse + + Returns: + dict: Parsed configuration + """ import json - if configuration_settings is None: + if backup_configuration_file is None: return {} - # If it's already a dict, return as-is - if isinstance(configuration_settings, dict): - return configuration_settings + # If it's already a dict, return as-is (validate_file_or_dict already parsed the file) + if isinstance(backup_configuration_file, dict): + return backup_configuration_file # If it's a string, try to parse as JSON - if isinstance(configuration_settings, str): + if isinstance(backup_configuration_file, str): try: - return json.loads(configuration_settings) + return json.loads(backup_configuration_file) except json.JSONDecodeError: raise InvalidArgumentValueError( - f"Invalid JSON in configuration-settings: '{configuration_settings}'" + f"Invalid JSON in backup-configuration-file: '{backup_configuration_file}'" ) return {} From db370ebde4a54a3269adc36a6ef7b77ff0cab23b Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:06:26 +0530 Subject: [PATCH 08/24] cleanup test file Signed-off-by: Anshul Ahuja --- .../tests/latest/test_dataprotection_backup_policy.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py index 86d47ba8e03..c6564bcbb63 100644 --- a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py +++ b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_backup_policy.py @@ -137,7 +137,4 @@ def test_dataprotection_backup_policy_generic_criteria(test): test.cmd('az dataprotection backup-policy tag create-generic-criteria --months-of-year ' 'JANUARY February MarCh april May June July August September October November December', checks=[ test.check('length(months_of_year)', 12) - ]) - - def test_dataprotection_enable_backup(test): - test.cmd('az dataprotection enable-backup enable-backup') \ No newline at end of file + ]) \ No newline at end of file From 89e410c8ba7de7abf915abf2668e8d397d5c0ac0 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:11:49 +0530 Subject: [PATCH 09/24] cleanup stray change Signed-off-by: Anshul Ahuja --- src/acat/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/acat/README.md b/src/acat/README.md index dd62d907731..55cd2f2e7f6 100644 --- a/src/acat/README.md +++ b/src/acat/README.md @@ -1,4 +1,3 @@ -Test # Azure CLI Acat Extension # This is an extension to Azure CLI to manage Acat resources. @@ -70,4 +69,4 @@ az acat report webhook create ` # check if the webhook is configured correctly az acat report webhook list --report-name $reportName | ConvertFrom-Json az acat report webhook show --report-name $reportName --webhook-name $hookName -``` +``` \ No newline at end of file From 076bfa2d74bb20185547da314c8afc5e59b0a133 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:16:53 +0530 Subject: [PATCH 10/24] Update Vendored AKS SDK Signed-off-by: Anshul Ahuja --- .../azure_mgmt_containerservice/__init__.py | 26 +- .../_configuration.py | 59 +- .../_container_service_client.py | 2728 +--------- .../{v2024_07_01 => }/_patch.py | 9 +- .../_version.py => _utils/__init__.py} | 3 - .../serialization.py} | 602 +-- .../azure_mgmt_containerservice/_version.py | 2 +- .../aio/__init__.py | 23 +- .../aio/_configuration.py | 57 +- .../aio/_container_service_client.py | 2729 +--------- .../{v2024_07_01/models => aio}/_patch.py | 9 +- .../aio/operations/__init__.py | 34 +- .../aio/operations/_agent_pools_operations.py | 218 +- .../aio/operations/_machines_operations.py | 55 +- .../_maintenance_configurations_operations.py | 104 +- .../_managed_clusters_operations.py | 467 +- .../_managed_namespaces_operations.py | 809 +++ .../aio/operations/_operations.py | 42 +- .../aio => aio/operations}/_patch.py | 9 +- ...private_endpoint_connections_operations.py | 83 +- .../_private_link_resources_operations.py | 38 +- ...olve_private_link_service_id_operations.py | 47 +- .../aio/operations/_snapshots_operations.py | 126 +- ...trusted_access_role_bindings_operations.py | 89 +- .../_trusted_access_roles_operations.py | 42 +- .../azure_mgmt_containerservice/models.py | 11 - .../models/__init__.py | 506 ++ .../models/_container_service_client_enums.py | 358 +- .../{v2024_07_01 => }/models/_models_py3.py | 4497 +++++++++++------ .../aio/operations => models}/_patch.py | 9 +- .../{v2024_07_01 => }/operations/__init__.py | 34 +- .../operations/_agent_pools_operations.py | 284 +- .../operations/_machines_operations.py | 66 +- .../_maintenance_configurations_operations.py | 120 +- .../_managed_clusters_operations.py | 599 ++- .../_managed_namespaces_operations.py | 1086 ++++ .../operations/_operations.py | 48 +- .../operations/_patch.py | 21 + ...private_endpoint_connections_operations.py | 103 +- .../_private_link_resources_operations.py | 46 +- ...olve_private_link_service_id_operations.py | 55 +- .../operations/_snapshots_operations.py | 146 +- ...trusted_access_role_bindings_operations.py | 109 +- .../_trusted_access_roles_operations.py | 50 +- .../v2024_07_01/__init__.py | 26 - .../v2024_07_01/_configuration.py | 65 - .../v2024_07_01/_container_service_client.py | 179 - .../v2024_07_01/aio/__init__.py | 23 - .../v2024_07_01/aio/_configuration.py | 65 - .../aio/_container_service_client.py | 182 - .../v2024_07_01/models/__init__.py | 407 -- .../v2024_07_01/operations/_patch.py | 20 - .../v2024_07_01/py.typed | 1 - 53 files changed, 8243 insertions(+), 9283 deletions(-) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/_patch.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01/_version.py => _utils/__init__.py} (92%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{_serialization.py => _utils/serialization.py} (81%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01/models => aio}/_patch.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/__init__.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_agent_pools_operations.py (85%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_machines_operations.py (81%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_maintenance_configurations_operations.py (81%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_managed_clusters_operations.py (87%) create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_namespaces_operations.py rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_operations.py (76%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01/aio => aio/operations}/_patch.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_private_endpoint_connections_operations.py (86%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_private_link_resources_operations.py (75%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_resolve_private_link_service_id_operations.py (81%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_snapshots_operations.py (83%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_trusted_access_role_bindings_operations.py (89%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/aio/operations/_trusted_access_roles_operations.py (77%) delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/__init__.py rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/models/_container_service_client_enums.py (57%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/models/_models_py3.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01/aio/operations => models}/_patch.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/__init__.py (61%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_agent_pools_operations.py (87%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_machines_operations.py (85%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_maintenance_configurations_operations.py (85%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_managed_clusters_operations.py (89%) create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_namespaces_operations.py rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_operations.py (76%) create mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_patch.py rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_private_endpoint_connections_operations.py (88%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_private_link_resources_operations.py (79%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_resolve_private_link_service_id_operations.py (83%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_snapshots_operations.py (87%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_trusted_access_role_bindings_operations.py (91%) rename src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/{v2024_07_01 => }/operations/_trusted_access_roles_operations.py (78%) delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py delete mode 100644 src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py index c9e1e010e0e..999067d049c 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/__init__.py @@ -5,16 +5,28 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._container_service_client import ContainerServiceClient -__all__ = ['ContainerServiceClient'] +from typing import TYPE_CHECKING -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import +from ._container_service_client import ContainerServiceClient # type: ignore from ._version import VERSION __version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ContainerServiceClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py index d0d261e8db8..2a018b2cb81 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_configuration.py @@ -1,14 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, TYPE_CHECKING + +from typing import Any, Optional, TYPE_CHECKING from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy @@ -16,10 +14,11 @@ from ._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports + from azure.core import AzureClouds from azure.core.credentials import TokenCredential -class ContainerServiceClientConfiguration: + +class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes """Configuration for ContainerServiceClient. Note that all parameters used to create this instance are saved as instance @@ -29,14 +28,23 @@ class ContainerServiceClientConfiguration: :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. :type subscription_id: str + :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :type cloud_setting: ~azure.core.AzureClouds + :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str """ def __init__( self, credential: "TokenCredential", subscription_id: str, + cloud_setting: Optional["AzureClouds"] = None, **kwargs: Any - ): + ) -> None: + api_version: str = kwargs.pop("api_version", "2025-10-01") + if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: @@ -44,23 +52,24 @@ def __init__( self.credential = credential self.subscription_id = subscription_id - self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) - kwargs.setdefault('sdk_moniker', 'azure-mgmt-containerservice/{}'.format(VERSION)) + self.cloud_setting = cloud_setting + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) - def _configure( - self, - **kwargs: Any - ): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) + self.authentication_policy = ARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py index d3dcf9c29cd..b5b52bb4815 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_container_service_client.py @@ -1,89 +1,116 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Optional, TYPE_CHECKING +from copy import deepcopy +from typing import Any, Optional, TYPE_CHECKING, cast from typing_extensions import Self from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.settings import settings from azure.mgmt.core import ARMPipelineClient from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy -from azure.profiles import KnownProfiles, ProfileDefinition -from azure.profiles.multiapiclient import MultiApiClientMixin +from azure.mgmt.core.tools import get_arm_endpoints +from . import models as _models from ._configuration import ContainerServiceClientConfiguration -from ._serialization import Deserializer, Serializer +from ._utils.serialization import Deserializer, Serializer +from .operations import ( + AgentPoolsOperations, + MachinesOperations, + MaintenanceConfigurationsOperations, + ManagedClustersOperations, + ManagedNamespacesOperations, + Operations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + ResolvePrivateLinkServiceIdOperations, + SnapshotsOperations, + TrustedAccessRoleBindingsOperations, + TrustedAccessRolesOperations, +) if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports + from azure.core import AzureClouds from azure.core.credentials import TokenCredential -class _SDKClient(object): - def __init__(self, *args, **kwargs): - """This is a fake class to support current implemetation of MultiApiClientMixin." - Will be removed in final version of multiapi azure-core based client - """ - pass -class ContainerServiceClient(MultiApiClientMixin, _SDKClient): +class ContainerServiceClient: # pylint: disable=too-many-instance-attributes """The Container Service Client. - This ready contains multiple API versions, to help you deal with all of the Azure clouds - (Azure Stack, Azure Government, Azure China, etc.). - By default, it uses the latest API version available on public Azure. - For production, you should stick to a particular api-version and/or profile. - The profile sets a mapping between an operation group and its API version. - The api-version parameter sets the default API version if the operation - group is not described in the profile. - + :ivar operations: Operations operations + :vartype operations: azure.mgmt.containerservice.operations.Operations + :ivar managed_clusters: ManagedClustersOperations operations + :vartype managed_clusters: azure.mgmt.containerservice.operations.ManagedClustersOperations + :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations + :vartype maintenance_configurations: + azure.mgmt.containerservice.operations.MaintenanceConfigurationsOperations + :ivar managed_namespaces: ManagedNamespacesOperations operations + :vartype managed_namespaces: azure.mgmt.containerservice.operations.ManagedNamespacesOperations + :ivar agent_pools: AgentPoolsOperations operations + :vartype agent_pools: azure.mgmt.containerservice.operations.AgentPoolsOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.containerservice.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.containerservice.operations.PrivateLinkResourcesOperations + :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations + :vartype resolve_private_link_service_id: + azure.mgmt.containerservice.operations.ResolvePrivateLinkServiceIdOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.containerservice.operations.SnapshotsOperations + :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations + :vartype trusted_access_role_bindings: + azure.mgmt.containerservice.operations.TrustedAccessRoleBindingsOperations + :ivar trusted_access_roles: TrustedAccessRolesOperations operations + :vartype trusted_access_roles: + azure.mgmt.containerservice.operations.TrustedAccessRolesOperations + :ivar machines: MachinesOperations operations + :vartype machines: azure.mgmt.containerservice.operations.MachinesOperations :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. :type subscription_id: str - :param api_version: API version to use if no profile is provided, or if missing in profile. - :type api_version: str - :param base_url: Service URL + :param base_url: Service URL. Default value is None. :type base_url: str - :param profile: A profile definition, from KnownProfiles to dict. - :type profile: azure.profiles.KnownProfiles - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :paramtype cloud_setting: ~azure.core.AzureClouds + :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ - DEFAULT_API_VERSION = '2024-07-01' - _PROFILE_TAG = "azure.mgmt.containerservice.ContainerServiceClient" - LATEST_PROFILE = ProfileDefinition({ - _PROFILE_TAG: { - None: DEFAULT_API_VERSION, - 'container_services': '2019-04-01', - 'fleet_members': '2022-09-02-preview', - 'fleets': '2022-09-02-preview', - 'load_balancers': '2024-06-02-preview', - 'managed_cluster_snapshots': '2024-06-02-preview', - 'open_shift_managed_clusters': '2019-04-30', - 'operation_status_result': '2024-06-02-preview', - }}, - _PROFILE_TAG + " latest" - ) - def __init__( self, credential: "TokenCredential", subscription_id: str, - api_version: Optional[str]=None, - base_url: str = "https://management.azure.com", - profile: KnownProfiles=KnownProfiles.default, + base_url: Optional[str] = None, + *, + cloud_setting: Optional["AzureClouds"] = None, **kwargs: Any - ): - if api_version: - kwargs.setdefault('api_version', api_version) - self._config = ContainerServiceClientConfiguration(credential, subscription_id, **kwargs) + ) -> None: + _cloud = cloud_setting or settings.current.azure_cloud # type: ignore + _endpoints = get_arm_endpoints(_cloud) + if not base_url: + base_url = _endpoints["resource_manager"] + credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"]) + self._config = ContainerServiceClientConfiguration( + credential=credential, + subscription_id=subscription_id, + cloud_setting=cloud_setting, + credential_scopes=credential_scopes, + **kwargs + ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -102,2554 +129,69 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, self._config.http_logging_policy, ] - self._client = ARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) - super(ContainerServiceClient, self).__init__( - api_version=api_version, - profile=profile + self._client: ARMPipelineClient = ARMPipelineClient(base_url=cast(str, base_url), policies=_policies, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) + self.managed_clusters = ManagedClustersOperations( + self._client, self._config, self._serialize, self._deserialize ) + self.maintenance_configurations = MaintenanceConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.managed_namespaces = ManagedNamespacesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize) + self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.trusted_access_roles = TrustedAccessRolesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize) - @classmethod - def _models_dict(cls, api_version): - return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} - - @classmethod - def models(cls, api_version=DEFAULT_API_VERSION): - """Module depends on the API version: - - * 2017-07-01: :mod:`v2017_07_01.models` - * 2018-03-31: :mod:`v2018_03_31.models` - * 2018-08-01-preview: :mod:`v2018_08_01_preview.models` - * 2018-09-30-preview: :mod:`v2018_09_30_preview.models` - * 2019-02-01: :mod:`v2019_02_01.models` - * 2019-04-01: :mod:`v2019_04_01.models` - * 2019-04-30: :mod:`v2019_04_30.models` - * 2019-06-01: :mod:`v2019_06_01.models` - * 2019-08-01: :mod:`v2019_08_01.models` - * 2019-09-30-preview: :mod:`v2019_09_30_preview.models` - * 2019-10-01: :mod:`v2019_10_01.models` - * 2019-10-27-preview: :mod:`v2019_10_27_preview.models` - * 2019-11-01: :mod:`v2019_11_01.models` - * 2020-01-01: :mod:`v2020_01_01.models` - * 2020-02-01: :mod:`v2020_02_01.models` - * 2020-03-01: :mod:`v2020_03_01.models` - * 2020-04-01: :mod:`v2020_04_01.models` - * 2020-06-01: :mod:`v2020_06_01.models` - * 2020-07-01: :mod:`v2020_07_01.models` - * 2020-09-01: :mod:`v2020_09_01.models` - * 2020-11-01: :mod:`v2020_11_01.models` - * 2020-12-01: :mod:`v2020_12_01.models` - * 2021-02-01: :mod:`v2021_02_01.models` - * 2021-03-01: :mod:`v2021_03_01.models` - * 2021-05-01: :mod:`v2021_05_01.models` - * 2021-07-01: :mod:`v2021_07_01.models` - * 2021-08-01: :mod:`v2021_08_01.models` - * 2021-09-01: :mod:`v2021_09_01.models` - * 2021-10-01: :mod:`v2021_10_01.models` - * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` - * 2022-01-01: :mod:`v2022_01_01.models` - * 2022-01-02-preview: :mod:`v2022_01_02_preview.models` - * 2022-02-01: :mod:`v2022_02_01.models` - * 2022-02-02-preview: :mod:`v2022_02_02_preview.models` - * 2022-03-01: :mod:`v2022_03_01.models` - * 2022-03-02-preview: :mod:`v2022_03_02_preview.models` - * 2022-04-01: :mod:`v2022_04_01.models` - * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` - * 2022-05-02-preview: :mod:`v2022_05_02_preview.models` - * 2022-06-01: :mod:`v2022_06_01.models` - * 2022-06-02-preview: :mod:`v2022_06_02_preview.models` - * 2022-07-01: :mod:`v2022_07_01.models` - * 2022-07-02-preview: :mod:`v2022_07_02_preview.models` - * 2022-08-02-preview: :mod:`v2022_08_02_preview.models` - * 2022-08-03-preview: :mod:`v2022_08_03_preview.models` - * 2022-09-01: :mod:`v2022_09_01.models` - * 2022-09-02-preview: :mod:`v2022_09_02_preview.models` - * 2022-10-02-preview: :mod:`v2022_10_02_preview.models` - * 2022-11-01: :mod:`v2022_11_01.models` - * 2022-11-02-preview: :mod:`v2022_11_02_preview.models` - * 2023-01-01: :mod:`v2023_01_01.models` - * 2023-01-02-preview: :mod:`v2023_01_02_preview.models` - * 2023-02-01: :mod:`v2023_02_01.models` - * 2023-02-02-preview: :mod:`v2023_02_02_preview.models` - * 2023-03-01: :mod:`v2023_03_01.models` - * 2023-03-02-preview: :mod:`v2023_03_02_preview.models` - * 2023-04-01: :mod:`v2023_04_01.models` - * 2023-04-02-preview: :mod:`v2023_04_02_preview.models` - * 2023-05-01: :mod:`v2023_05_01.models` - * 2023-05-02-preview: :mod:`v2023_05_02_preview.models` - * 2023-06-01: :mod:`v2023_06_01.models` - * 2023-06-02-preview: :mod:`v2023_06_02_preview.models` - * 2023-07-01: :mod:`v2023_07_01.models` - * 2023-07-02-preview: :mod:`v2023_07_02_preview.models` - * 2023-08-01: :mod:`v2023_08_01.models` - * 2023-08-02-preview: :mod:`v2023_08_02_preview.models` - * 2023-09-01: :mod:`v2023_09_01.models` - * 2023-09-02-preview: :mod:`v2023_09_02_preview.models` - * 2023-10-01: :mod:`v2023_10_01.models` - * 2023-10-02-preview: :mod:`v2023_10_02_preview.models` - * 2023-11-01: :mod:`v2023_11_01.models` - * 2023-11-02-preview: :mod:`v2023_11_02_preview.models` - * 2024-01-01: :mod:`v2024_01_01.models` - * 2024-01-02-preview: :mod:`v2024_01_02_preview.models` - * 2024-02-01: :mod:`v2024_02_01.models` - * 2024-02-02-preview: :mod:`v2024_02_02_preview.models` - * 2024-03-02-preview: :mod:`v2024_03_02_preview.models` - * 2024-04-02-preview: :mod:`v2024_04_02_preview.models` - * 2024-05-01: :mod:`v2024_05_01.models` - * 2024-05-02-preview: :mod:`v2024_05_02_preview.models` - * 2024-06-02-preview: :mod:`v2024_06_02_preview.models` - * 2024-07-01: :mod:`v2024_07_01.models` - """ - if api_version == '2017-07-01': - from .v2017_07_01 import models - return models - elif api_version == '2018-03-31': - from .v2018_03_31 import models - return models - elif api_version == '2018-08-01-preview': - from .v2018_08_01_preview import models - return models - elif api_version == '2018-09-30-preview': - from .v2018_09_30_preview import models - return models - elif api_version == '2019-02-01': - from .v2019_02_01 import models - return models - elif api_version == '2019-04-01': - from .v2019_04_01 import models - return models - elif api_version == '2019-04-30': - from .v2019_04_30 import models - return models - elif api_version == '2019-06-01': - from .v2019_06_01 import models - return models - elif api_version == '2019-08-01': - from .v2019_08_01 import models - return models - elif api_version == '2019-09-30-preview': - from .v2019_09_30_preview import models - return models - elif api_version == '2019-10-01': - from .v2019_10_01 import models - return models - elif api_version == '2019-10-27-preview': - from .v2019_10_27_preview import models - return models - elif api_version == '2019-11-01': - from .v2019_11_01 import models - return models - elif api_version == '2020-01-01': - from .v2020_01_01 import models - return models - elif api_version == '2020-02-01': - from .v2020_02_01 import models - return models - elif api_version == '2020-03-01': - from .v2020_03_01 import models - return models - elif api_version == '2020-04-01': - from .v2020_04_01 import models - return models - elif api_version == '2020-06-01': - from .v2020_06_01 import models - return models - elif api_version == '2020-07-01': - from .v2020_07_01 import models - return models - elif api_version == '2020-09-01': - from .v2020_09_01 import models - return models - elif api_version == '2020-11-01': - from .v2020_11_01 import models - return models - elif api_version == '2020-12-01': - from .v2020_12_01 import models - return models - elif api_version == '2021-02-01': - from .v2021_02_01 import models - return models - elif api_version == '2021-03-01': - from .v2021_03_01 import models - return models - elif api_version == '2021-05-01': - from .v2021_05_01 import models - return models - elif api_version == '2021-07-01': - from .v2021_07_01 import models - return models - elif api_version == '2021-08-01': - from .v2021_08_01 import models - return models - elif api_version == '2021-09-01': - from .v2021_09_01 import models - return models - elif api_version == '2021-10-01': - from .v2021_10_01 import models - return models - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview import models - return models - elif api_version == '2022-01-01': - from .v2022_01_01 import models - return models - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview import models - return models - elif api_version == '2022-02-01': - from .v2022_02_01 import models - return models - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview import models - return models - elif api_version == '2022-03-01': - from .v2022_03_01 import models - return models - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview import models - return models - elif api_version == '2022-04-01': - from .v2022_04_01 import models - return models - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview import models - return models - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview import models - return models - elif api_version == '2022-06-01': - from .v2022_06_01 import models - return models - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview import models - return models - elif api_version == '2022-07-01': - from .v2022_07_01 import models - return models - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview import models - return models - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview import models - return models - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview import models - return models - elif api_version == '2022-09-01': - from .v2022_09_01 import models - return models - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview import models - return models - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview import models - return models - elif api_version == '2022-11-01': - from .v2022_11_01 import models - return models - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview import models - return models - elif api_version == '2023-01-01': - from .v2023_01_01 import models - return models - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview import models - return models - elif api_version == '2023-02-01': - from .v2023_02_01 import models - return models - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview import models - return models - elif api_version == '2023-03-01': - from .v2023_03_01 import models - return models - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview import models - return models - elif api_version == '2023-04-01': - from .v2023_04_01 import models - return models - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview import models - return models - elif api_version == '2023-05-01': - from .v2023_05_01 import models - return models - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview import models - return models - elif api_version == '2023-06-01': - from .v2023_06_01 import models - return models - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview import models - return models - elif api_version == '2023-07-01': - from .v2023_07_01 import models - return models - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview import models - return models - elif api_version == '2023-08-01': - from .v2023_08_01 import models - return models - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview import models - return models - elif api_version == '2023-09-01': - from .v2023_09_01 import models - return models - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview import models - return models - elif api_version == '2023-10-01': - from .v2023_10_01 import models - return models - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview import models - return models - elif api_version == '2023-11-01': - from .v2023_11_01 import models - return models - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview import models - return models - elif api_version == '2024-01-01': - from .v2024_01_01 import models - return models - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview import models - return models - elif api_version == '2024-02-01': - from .v2024_02_01 import models - return models - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview import models - return models - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview import models - return models - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview import models - return models - elif api_version == '2024-05-01': - from .v2024_05_01 import models - return models - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview import models - return models - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview import models - return models - elif api_version == '2024-07-01': - from .v2024_07_01 import models - return models - raise ValueError("API version {} is not available".format(api_version)) - - @property - def agent_pools(self): - """Instance depends on the API version: - - * 2019-02-01: :class:`AgentPoolsOperations` - * 2019-04-01: :class:`AgentPoolsOperations` - * 2019-06-01: :class:`AgentPoolsOperations` - * 2019-08-01: :class:`AgentPoolsOperations` - * 2019-10-01: :class:`AgentPoolsOperations` - * 2019-11-01: :class:`AgentPoolsOperations` - * 2020-01-01: :class:`AgentPoolsOperations` - * 2020-02-01: :class:`AgentPoolsOperations` - * 2020-03-01: :class:`AgentPoolsOperations` - * 2020-04-01: :class:`AgentPoolsOperations` - * 2020-06-01: :class:`AgentPoolsOperations` - * 2020-07-01: :class:`AgentPoolsOperations` - * 2020-09-01: :class:`AgentPoolsOperations` - * 2020-11-01: :class:`AgentPoolsOperations` - * 2020-12-01: :class:`AgentPoolsOperations` - * 2021-02-01: :class:`AgentPoolsOperations` - * 2021-03-01: :class:`AgentPoolsOperations` - * 2021-05-01: :class:`AgentPoolsOperations` - * 2021-07-01: :class:`AgentPoolsOperations` - * 2021-08-01: :class:`AgentPoolsOperations` - * 2021-09-01: :class:`AgentPoolsOperations` - * 2021-10-01: :class:`AgentPoolsOperations` - * 2021-11-01-preview: :class:`AgentPoolsOperations` - * 2022-01-01: :class:`AgentPoolsOperations` - * 2022-01-02-preview: :class:`AgentPoolsOperations` - * 2022-02-01: :class:`AgentPoolsOperations` - * 2022-02-02-preview: :class:`AgentPoolsOperations` - * 2022-03-01: :class:`AgentPoolsOperations` - * 2022-03-02-preview: :class:`AgentPoolsOperations` - * 2022-04-01: :class:`AgentPoolsOperations` - * 2022-04-02-preview: :class:`AgentPoolsOperations` - * 2022-05-02-preview: :class:`AgentPoolsOperations` - * 2022-06-01: :class:`AgentPoolsOperations` - * 2022-06-02-preview: :class:`AgentPoolsOperations` - * 2022-07-01: :class:`AgentPoolsOperations` - * 2022-07-02-preview: :class:`AgentPoolsOperations` - * 2022-08-02-preview: :class:`AgentPoolsOperations` - * 2022-08-03-preview: :class:`AgentPoolsOperations` - * 2022-09-01: :class:`AgentPoolsOperations` - * 2022-09-02-preview: :class:`AgentPoolsOperations` - * 2022-10-02-preview: :class:`AgentPoolsOperations` - * 2022-11-01: :class:`AgentPoolsOperations` - * 2022-11-02-preview: :class:`AgentPoolsOperations` - * 2023-01-01: :class:`AgentPoolsOperations` - * 2023-01-02-preview: :class:`AgentPoolsOperations` - * 2023-02-01: :class:`AgentPoolsOperations` - * 2023-02-02-preview: :class:`AgentPoolsOperations` - * 2023-03-01: :class:`AgentPoolsOperations` - * 2023-03-02-preview: :class:`AgentPoolsOperations` - * 2023-04-01: :class:`AgentPoolsOperations` - * 2023-04-02-preview: :class:`AgentPoolsOperations` - * 2023-05-01: :class:`AgentPoolsOperations` - * 2023-05-02-preview: :class:`AgentPoolsOperations` - * 2023-06-01: :class:`AgentPoolsOperations` - * 2023-06-02-preview: :class:`AgentPoolsOperations` - * 2023-07-01: :class:`AgentPoolsOperations` - * 2023-07-02-preview: :class:`AgentPoolsOperations` - * 2023-08-01: :class:`AgentPoolsOperations` - * 2023-08-02-preview: :class:`AgentPoolsOperations` - * 2023-09-01: :class:`AgentPoolsOperations` - * 2023-09-02-preview: :class:`AgentPoolsOperations` - * 2023-10-01: :class:`AgentPoolsOperations` - * 2023-10-02-preview: :class:`AgentPoolsOperations` - * 2023-11-01: :class:`AgentPoolsOperations` - * 2023-11-02-preview: :class:`AgentPoolsOperations` - * 2024-01-01: :class:`AgentPoolsOperations` - * 2024-01-02-preview: :class:`AgentPoolsOperations` - * 2024-02-01: :class:`AgentPoolsOperations` - * 2024-02-02-preview: :class:`AgentPoolsOperations` - * 2024-03-02-preview: :class:`AgentPoolsOperations` - * 2024-04-02-preview: :class:`AgentPoolsOperations` - * 2024-05-01: :class:`AgentPoolsOperations` - * 2024-05-02-preview: :class:`AgentPoolsOperations` - * 2024-06-02-preview: :class:`AgentPoolsOperations` - * 2024-07-01: :class:`AgentPoolsOperations` - """ - api_version = self._get_api_version('agent_pools') - if api_version == '2019-02-01': - from .v2019_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-04-01': - from .v2019_04_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-06-01': - from .v2019_06_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-08-01': - from .v2019_08_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-10-01': - from .v2019_10_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-11-01': - from .v2019_11_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-01-01': - from .v2020_01_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-02-01': - from .v2020_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-03-01': - from .v2020_03_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-04-01': - from .v2020_04_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-06-01': - from .v2020_06_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-07-01': - from .v2020_07_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-09-01': - from .v2020_09_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import AgentPoolsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'agent_pools'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def container_services(self): - """Instance depends on the API version: - - * 2017-07-01: :class:`ContainerServicesOperations` - * 2019-04-01: :class:`ContainerServicesOperations` - """ - api_version = self._get_api_version('container_services') - if api_version == '2017-07-01': - from .v2017_07_01.operations import ContainerServicesOperations as OperationClass - elif api_version == '2019-04-01': - from .v2019_04_01.operations import ContainerServicesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'container_services'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def fleet_members(self): - """Instance depends on the API version: - - * 2022-06-02-preview: :class:`FleetMembersOperations` - * 2022-07-02-preview: :class:`FleetMembersOperations` - * 2022-09-02-preview: :class:`FleetMembersOperations` - """ - api_version = self._get_api_version('fleet_members') - if api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import FleetMembersOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import FleetMembersOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import FleetMembersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'fleet_members'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def fleets(self): - """Instance depends on the API version: - - * 2022-06-02-preview: :class:`FleetsOperations` - * 2022-07-02-preview: :class:`FleetsOperations` - * 2022-09-02-preview: :class:`FleetsOperations` - """ - api_version = self._get_api_version('fleets') - if api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import FleetsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import FleetsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import FleetsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'fleets'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def load_balancers(self): - """Instance depends on the API version: - - * 2024-03-02-preview: :class:`LoadBalancersOperations` - * 2024-04-02-preview: :class:`LoadBalancersOperations` - * 2024-05-02-preview: :class:`LoadBalancersOperations` - * 2024-06-02-preview: :class:`LoadBalancersOperations` - """ - api_version = self._get_api_version('load_balancers') - if api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import LoadBalancersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'load_balancers'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def machines(self): - """Instance depends on the API version: - - * 2023-07-02-preview: :class:`MachinesOperations` - * 2023-08-02-preview: :class:`MachinesOperations` - * 2023-09-02-preview: :class:`MachinesOperations` - * 2023-10-02-preview: :class:`MachinesOperations` - * 2023-11-02-preview: :class:`MachinesOperations` - * 2024-01-02-preview: :class:`MachinesOperations` - * 2024-02-02-preview: :class:`MachinesOperations` - * 2024-03-02-preview: :class:`MachinesOperations` - * 2024-04-02-preview: :class:`MachinesOperations` - * 2024-05-02-preview: :class:`MachinesOperations` - * 2024-06-02-preview: :class:`MachinesOperations` - * 2024-07-01: :class:`MachinesOperations` - """ - api_version = self._get_api_version('machines') - if api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import MachinesOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import MachinesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'machines'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def maintenance_configurations(self): - """Instance depends on the API version: - - * 2020-12-01: :class:`MaintenanceConfigurationsOperations` - * 2021-02-01: :class:`MaintenanceConfigurationsOperations` - * 2021-03-01: :class:`MaintenanceConfigurationsOperations` - * 2021-05-01: :class:`MaintenanceConfigurationsOperations` - * 2021-07-01: :class:`MaintenanceConfigurationsOperations` - * 2021-08-01: :class:`MaintenanceConfigurationsOperations` - * 2021-09-01: :class:`MaintenanceConfigurationsOperations` - * 2021-10-01: :class:`MaintenanceConfigurationsOperations` - * 2021-11-01-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-01-01: :class:`MaintenanceConfigurationsOperations` - * 2022-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-02-01: :class:`MaintenanceConfigurationsOperations` - * 2022-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-03-01: :class:`MaintenanceConfigurationsOperations` - * 2022-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-04-01: :class:`MaintenanceConfigurationsOperations` - * 2022-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-06-01: :class:`MaintenanceConfigurationsOperations` - * 2022-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-07-01: :class:`MaintenanceConfigurationsOperations` - * 2022-07-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-08-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-08-03-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-09-01: :class:`MaintenanceConfigurationsOperations` - * 2022-09-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-10-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-11-01: :class:`MaintenanceConfigurationsOperations` - * 2022-11-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-01-01: :class:`MaintenanceConfigurationsOperations` - * 2023-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-02-01: :class:`MaintenanceConfigurationsOperations` - * 2023-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-03-01: :class:`MaintenanceConfigurationsOperations` - * 2023-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-04-01: :class:`MaintenanceConfigurationsOperations` - * 2023-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-05-01: :class:`MaintenanceConfigurationsOperations` - * 2023-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-06-01: :class:`MaintenanceConfigurationsOperations` - * 2023-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-07-01: :class:`MaintenanceConfigurationsOperations` - * 2023-07-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-08-01: :class:`MaintenanceConfigurationsOperations` - * 2023-08-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-09-01: :class:`MaintenanceConfigurationsOperations` - * 2023-09-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-10-01: :class:`MaintenanceConfigurationsOperations` - * 2023-10-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-11-01: :class:`MaintenanceConfigurationsOperations` - * 2023-11-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-01-01: :class:`MaintenanceConfigurationsOperations` - * 2024-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-02-01: :class:`MaintenanceConfigurationsOperations` - * 2024-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-05-01: :class:`MaintenanceConfigurationsOperations` - * 2024-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-07-01: :class:`MaintenanceConfigurationsOperations` - """ - api_version = self._get_api_version('maintenance_configurations') - if api_version == '2020-12-01': - from .v2020_12_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import MaintenanceConfigurationsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'maintenance_configurations'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def managed_cluster_snapshots(self): - """Instance depends on the API version: - - * 2022-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-07-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-08-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-08-03-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-09-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-10-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-11-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-01-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-07-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-08-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-09-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-10-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-11-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-01-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - """ - api_version = self._get_api_version('managed_cluster_snapshots') - if api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import ManagedClusterSnapshotsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'managed_cluster_snapshots'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def managed_clusters(self): - """Instance depends on the API version: - - * 2018-03-31: :class:`ManagedClustersOperations` - * 2018-08-01-preview: :class:`ManagedClustersOperations` - * 2019-02-01: :class:`ManagedClustersOperations` - * 2019-04-01: :class:`ManagedClustersOperations` - * 2019-06-01: :class:`ManagedClustersOperations` - * 2019-08-01: :class:`ManagedClustersOperations` - * 2019-10-01: :class:`ManagedClustersOperations` - * 2019-11-01: :class:`ManagedClustersOperations` - * 2020-01-01: :class:`ManagedClustersOperations` - * 2020-02-01: :class:`ManagedClustersOperations` - * 2020-03-01: :class:`ManagedClustersOperations` - * 2020-04-01: :class:`ManagedClustersOperations` - * 2020-06-01: :class:`ManagedClustersOperations` - * 2020-07-01: :class:`ManagedClustersOperations` - * 2020-09-01: :class:`ManagedClustersOperations` - * 2020-11-01: :class:`ManagedClustersOperations` - * 2020-12-01: :class:`ManagedClustersOperations` - * 2021-02-01: :class:`ManagedClustersOperations` - * 2021-03-01: :class:`ManagedClustersOperations` - * 2021-05-01: :class:`ManagedClustersOperations` - * 2021-07-01: :class:`ManagedClustersOperations` - * 2021-08-01: :class:`ManagedClustersOperations` - * 2021-09-01: :class:`ManagedClustersOperations` - * 2021-10-01: :class:`ManagedClustersOperations` - * 2021-11-01-preview: :class:`ManagedClustersOperations` - * 2022-01-01: :class:`ManagedClustersOperations` - * 2022-01-02-preview: :class:`ManagedClustersOperations` - * 2022-02-01: :class:`ManagedClustersOperations` - * 2022-02-02-preview: :class:`ManagedClustersOperations` - * 2022-03-01: :class:`ManagedClustersOperations` - * 2022-03-02-preview: :class:`ManagedClustersOperations` - * 2022-04-01: :class:`ManagedClustersOperations` - * 2022-04-02-preview: :class:`ManagedClustersOperations` - * 2022-05-02-preview: :class:`ManagedClustersOperations` - * 2022-06-01: :class:`ManagedClustersOperations` - * 2022-06-02-preview: :class:`ManagedClustersOperations` - * 2022-07-01: :class:`ManagedClustersOperations` - * 2022-07-02-preview: :class:`ManagedClustersOperations` - * 2022-08-02-preview: :class:`ManagedClustersOperations` - * 2022-08-03-preview: :class:`ManagedClustersOperations` - * 2022-09-01: :class:`ManagedClustersOperations` - * 2022-09-02-preview: :class:`ManagedClustersOperations` - * 2022-10-02-preview: :class:`ManagedClustersOperations` - * 2022-11-01: :class:`ManagedClustersOperations` - * 2022-11-02-preview: :class:`ManagedClustersOperations` - * 2023-01-01: :class:`ManagedClustersOperations` - * 2023-01-02-preview: :class:`ManagedClustersOperations` - * 2023-02-01: :class:`ManagedClustersOperations` - * 2023-02-02-preview: :class:`ManagedClustersOperations` - * 2023-03-01: :class:`ManagedClustersOperations` - * 2023-03-02-preview: :class:`ManagedClustersOperations` - * 2023-04-01: :class:`ManagedClustersOperations` - * 2023-04-02-preview: :class:`ManagedClustersOperations` - * 2023-05-01: :class:`ManagedClustersOperations` - * 2023-05-02-preview: :class:`ManagedClustersOperations` - * 2023-06-01: :class:`ManagedClustersOperations` - * 2023-06-02-preview: :class:`ManagedClustersOperations` - * 2023-07-01: :class:`ManagedClustersOperations` - * 2023-07-02-preview: :class:`ManagedClustersOperations` - * 2023-08-01: :class:`ManagedClustersOperations` - * 2023-08-02-preview: :class:`ManagedClustersOperations` - * 2023-09-01: :class:`ManagedClustersOperations` - * 2023-09-02-preview: :class:`ManagedClustersOperations` - * 2023-10-01: :class:`ManagedClustersOperations` - * 2023-10-02-preview: :class:`ManagedClustersOperations` - * 2023-11-01: :class:`ManagedClustersOperations` - * 2023-11-02-preview: :class:`ManagedClustersOperations` - * 2024-01-01: :class:`ManagedClustersOperations` - * 2024-01-02-preview: :class:`ManagedClustersOperations` - * 2024-02-01: :class:`ManagedClustersOperations` - * 2024-02-02-preview: :class:`ManagedClustersOperations` - * 2024-03-02-preview: :class:`ManagedClustersOperations` - * 2024-04-02-preview: :class:`ManagedClustersOperations` - * 2024-05-01: :class:`ManagedClustersOperations` - * 2024-05-02-preview: :class:`ManagedClustersOperations` - * 2024-06-02-preview: :class:`ManagedClustersOperations` - * 2024-07-01: :class:`ManagedClustersOperations` - """ - api_version = self._get_api_version('managed_clusters') - if api_version == '2018-03-31': - from .v2018_03_31.operations import ManagedClustersOperations as OperationClass - elif api_version == '2018-08-01-preview': - from .v2018_08_01_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-02-01': - from .v2019_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-04-01': - from .v2019_04_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-06-01': - from .v2019_06_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-08-01': - from .v2019_08_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-10-01': - from .v2019_10_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-11-01': - from .v2019_11_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-01-01': - from .v2020_01_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-02-01': - from .v2020_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-03-01': - from .v2020_03_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-04-01': - from .v2020_04_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-06-01': - from .v2020_06_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-07-01': - from .v2020_07_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-09-01': - from .v2020_09_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import ManagedClustersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'managed_clusters'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def open_shift_managed_clusters(self): - """Instance depends on the API version: - - * 2018-09-30-preview: :class:`OpenShiftManagedClustersOperations` - * 2019-04-30: :class:`OpenShiftManagedClustersOperations` - * 2019-09-30-preview: :class:`OpenShiftManagedClustersOperations` - * 2019-10-27-preview: :class:`OpenShiftManagedClustersOperations` - """ - api_version = self._get_api_version('open_shift_managed_clusters') - if api_version == '2018-09-30-preview': - from .v2018_09_30_preview.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-04-30': - from .v2019_04_30.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-09-30-preview': - from .v2019_09_30_preview.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-10-27-preview': - from .v2019_10_27_preview.operations import OpenShiftManagedClustersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'open_shift_managed_clusters'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def operation_status_result(self): - """Instance depends on the API version: - - * 2023-10-02-preview: :class:`OperationStatusResultOperations` - * 2023-11-02-preview: :class:`OperationStatusResultOperations` - * 2024-01-02-preview: :class:`OperationStatusResultOperations` - * 2024-02-02-preview: :class:`OperationStatusResultOperations` - * 2024-03-02-preview: :class:`OperationStatusResultOperations` - * 2024-04-02-preview: :class:`OperationStatusResultOperations` - * 2024-05-02-preview: :class:`OperationStatusResultOperations` - * 2024-06-02-preview: :class:`OperationStatusResultOperations` - """ - api_version = self._get_api_version('operation_status_result') - if api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import OperationStatusResultOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'operation_status_result'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def operations(self): - """Instance depends on the API version: - - * 2018-03-31: :class:`Operations` - * 2018-08-01-preview: :class:`Operations` - * 2019-02-01: :class:`Operations` - * 2019-04-01: :class:`Operations` - * 2019-06-01: :class:`Operations` - * 2019-08-01: :class:`Operations` - * 2019-10-01: :class:`Operations` - * 2019-11-01: :class:`Operations` - * 2020-01-01: :class:`Operations` - * 2020-02-01: :class:`Operations` - * 2020-03-01: :class:`Operations` - * 2020-04-01: :class:`Operations` - * 2020-06-01: :class:`Operations` - * 2020-07-01: :class:`Operations` - * 2020-09-01: :class:`Operations` - * 2020-11-01: :class:`Operations` - * 2020-12-01: :class:`Operations` - * 2021-02-01: :class:`Operations` - * 2021-03-01: :class:`Operations` - * 2021-05-01: :class:`Operations` - * 2021-07-01: :class:`Operations` - * 2021-08-01: :class:`Operations` - * 2021-09-01: :class:`Operations` - * 2021-10-01: :class:`Operations` - * 2021-11-01-preview: :class:`Operations` - * 2022-01-01: :class:`Operations` - * 2022-01-02-preview: :class:`Operations` - * 2022-02-01: :class:`Operations` - * 2022-02-02-preview: :class:`Operations` - * 2022-03-01: :class:`Operations` - * 2022-03-02-preview: :class:`Operations` - * 2022-04-01: :class:`Operations` - * 2022-04-02-preview: :class:`Operations` - * 2022-05-02-preview: :class:`Operations` - * 2022-06-01: :class:`Operations` - * 2022-06-02-preview: :class:`Operations` - * 2022-07-01: :class:`Operations` - * 2022-07-02-preview: :class:`Operations` - * 2022-08-02-preview: :class:`Operations` - * 2022-08-03-preview: :class:`Operations` - * 2022-09-01: :class:`Operations` - * 2022-09-02-preview: :class:`Operations` - * 2022-10-02-preview: :class:`Operations` - * 2022-11-01: :class:`Operations` - * 2022-11-02-preview: :class:`Operations` - * 2023-01-01: :class:`Operations` - * 2023-01-02-preview: :class:`Operations` - * 2023-02-01: :class:`Operations` - * 2023-02-02-preview: :class:`Operations` - * 2023-03-01: :class:`Operations` - * 2023-03-02-preview: :class:`Operations` - * 2023-04-01: :class:`Operations` - * 2023-04-02-preview: :class:`Operations` - * 2023-05-01: :class:`Operations` - * 2023-05-02-preview: :class:`Operations` - * 2023-06-01: :class:`Operations` - * 2023-06-02-preview: :class:`Operations` - * 2023-07-01: :class:`Operations` - * 2023-07-02-preview: :class:`Operations` - * 2023-08-01: :class:`Operations` - * 2023-08-02-preview: :class:`Operations` - * 2023-09-01: :class:`Operations` - * 2023-09-02-preview: :class:`Operations` - * 2023-10-01: :class:`Operations` - * 2023-10-02-preview: :class:`Operations` - * 2023-11-01: :class:`Operations` - * 2023-11-02-preview: :class:`Operations` - * 2024-01-01: :class:`Operations` - * 2024-01-02-preview: :class:`Operations` - * 2024-02-01: :class:`Operations` - * 2024-02-02-preview: :class:`Operations` - * 2024-03-02-preview: :class:`Operations` - * 2024-04-02-preview: :class:`Operations` - * 2024-05-01: :class:`Operations` - * 2024-05-02-preview: :class:`Operations` - * 2024-06-02-preview: :class:`Operations` - * 2024-07-01: :class:`Operations` - """ - api_version = self._get_api_version('operations') - if api_version == '2018-03-31': - from .v2018_03_31.operations import Operations as OperationClass - elif api_version == '2018-08-01-preview': - from .v2018_08_01_preview.operations import Operations as OperationClass - elif api_version == '2019-02-01': - from .v2019_02_01.operations import Operations as OperationClass - elif api_version == '2019-04-01': - from .v2019_04_01.operations import Operations as OperationClass - elif api_version == '2019-06-01': - from .v2019_06_01.operations import Operations as OperationClass - elif api_version == '2019-08-01': - from .v2019_08_01.operations import Operations as OperationClass - elif api_version == '2019-10-01': - from .v2019_10_01.operations import Operations as OperationClass - elif api_version == '2019-11-01': - from .v2019_11_01.operations import Operations as OperationClass - elif api_version == '2020-01-01': - from .v2020_01_01.operations import Operations as OperationClass - elif api_version == '2020-02-01': - from .v2020_02_01.operations import Operations as OperationClass - elif api_version == '2020-03-01': - from .v2020_03_01.operations import Operations as OperationClass - elif api_version == '2020-04-01': - from .v2020_04_01.operations import Operations as OperationClass - elif api_version == '2020-06-01': - from .v2020_06_01.operations import Operations as OperationClass - elif api_version == '2020-07-01': - from .v2020_07_01.operations import Operations as OperationClass - elif api_version == '2020-09-01': - from .v2020_09_01.operations import Operations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import Operations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import Operations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import Operations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import Operations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import Operations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import Operations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import Operations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import Operations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import Operations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import Operations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import Operations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import Operations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import Operations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import Operations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import Operations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import Operations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import Operations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import Operations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import Operations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import Operations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import Operations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import Operations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import Operations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import Operations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import Operations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import Operations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import Operations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import Operations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import Operations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import Operations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import Operations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import Operations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import Operations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import Operations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import Operations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import Operations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import Operations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import Operations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import Operations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import Operations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import Operations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import Operations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import Operations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import Operations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import Operations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import Operations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import Operations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import Operations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import Operations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import Operations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import Operations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import Operations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import Operations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import Operations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import Operations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import Operations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import Operations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import Operations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import Operations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import Operations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import Operations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import Operations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def private_endpoint_connections(self): - """Instance depends on the API version: - - * 2020-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-12-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-08-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-10-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-11-01-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-04-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-07-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-08-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-08-03-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-09-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-10-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-11-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-04-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-07-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-08-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-08-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-09-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-10-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-10-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-11-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-07-01: :class:`PrivateEndpointConnectionsOperations` - """ - api_version = self._get_api_version('private_endpoint_connections') - if api_version == '2020-06-01': - from .v2020_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-07-01': - from .v2020_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-09-01': - from .v2020_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import PrivateEndpointConnectionsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def private_link_resources(self): - """Instance depends on the API version: - - * 2020-09-01: :class:`PrivateLinkResourcesOperations` - * 2020-11-01: :class:`PrivateLinkResourcesOperations` - * 2020-12-01: :class:`PrivateLinkResourcesOperations` - * 2021-02-01: :class:`PrivateLinkResourcesOperations` - * 2021-03-01: :class:`PrivateLinkResourcesOperations` - * 2021-05-01: :class:`PrivateLinkResourcesOperations` - * 2021-07-01: :class:`PrivateLinkResourcesOperations` - * 2021-08-01: :class:`PrivateLinkResourcesOperations` - * 2021-09-01: :class:`PrivateLinkResourcesOperations` - * 2021-10-01: :class:`PrivateLinkResourcesOperations` - * 2021-11-01-preview: :class:`PrivateLinkResourcesOperations` - * 2022-01-01: :class:`PrivateLinkResourcesOperations` - * 2022-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-02-01: :class:`PrivateLinkResourcesOperations` - * 2022-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-03-01: :class:`PrivateLinkResourcesOperations` - * 2022-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-04-01: :class:`PrivateLinkResourcesOperations` - * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-06-01: :class:`PrivateLinkResourcesOperations` - * 2022-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-07-01: :class:`PrivateLinkResourcesOperations` - * 2022-07-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-08-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-08-03-preview: :class:`PrivateLinkResourcesOperations` - * 2022-09-01: :class:`PrivateLinkResourcesOperations` - * 2022-09-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-10-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-11-01: :class:`PrivateLinkResourcesOperations` - * 2022-11-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-01-01: :class:`PrivateLinkResourcesOperations` - * 2023-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-02-01: :class:`PrivateLinkResourcesOperations` - * 2023-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-03-01: :class:`PrivateLinkResourcesOperations` - * 2023-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-04-01: :class:`PrivateLinkResourcesOperations` - * 2023-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-05-01: :class:`PrivateLinkResourcesOperations` - * 2023-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-06-01: :class:`PrivateLinkResourcesOperations` - * 2023-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-07-01: :class:`PrivateLinkResourcesOperations` - * 2023-07-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-08-01: :class:`PrivateLinkResourcesOperations` - * 2023-08-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-09-01: :class:`PrivateLinkResourcesOperations` - * 2023-09-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-10-01: :class:`PrivateLinkResourcesOperations` - * 2023-10-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-11-01: :class:`PrivateLinkResourcesOperations` - * 2023-11-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-01-01: :class:`PrivateLinkResourcesOperations` - * 2024-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-02-01: :class:`PrivateLinkResourcesOperations` - * 2024-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-05-01: :class:`PrivateLinkResourcesOperations` - * 2024-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-07-01: :class:`PrivateLinkResourcesOperations` - """ - api_version = self._get_api_version('private_link_resources') - if api_version == '2020-09-01': - from .v2020_09_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import PrivateLinkResourcesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def resolve_private_link_service_id(self): - """Instance depends on the API version: - - * 2020-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2020-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2020-12-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-08-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-10-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-11-01-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-04-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-06-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-08-03-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-04-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-06-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-08-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-10-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - """ - api_version = self._get_api_version('resolve_private_link_service_id') - if api_version == '2020-09-01': - from .v2020_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2020-11-01': - from .v2020_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2020-12-01': - from .v2020_12_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-02-01': - from .v2021_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-03-01': - from .v2021_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-05-01': - from .v2021_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-07-01': - from .v2021_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-08-01': - from .v2021_08_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'resolve_private_link_service_id'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def snapshots(self): - """Instance depends on the API version: + def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. - * 2021-08-01: :class:`SnapshotsOperations` - * 2021-09-01: :class:`SnapshotsOperations` - * 2021-10-01: :class:`SnapshotsOperations` - * 2021-11-01-preview: :class:`SnapshotsOperations` - * 2022-01-01: :class:`SnapshotsOperations` - * 2022-01-02-preview: :class:`SnapshotsOperations` - * 2022-02-01: :class:`SnapshotsOperations` - * 2022-02-02-preview: :class:`SnapshotsOperations` - * 2022-03-01: :class:`SnapshotsOperations` - * 2022-03-02-preview: :class:`SnapshotsOperations` - * 2022-04-01: :class:`SnapshotsOperations` - * 2022-04-02-preview: :class:`SnapshotsOperations` - * 2022-05-02-preview: :class:`SnapshotsOperations` - * 2022-06-01: :class:`SnapshotsOperations` - * 2022-06-02-preview: :class:`SnapshotsOperations` - * 2022-07-01: :class:`SnapshotsOperations` - * 2022-07-02-preview: :class:`SnapshotsOperations` - * 2022-08-02-preview: :class:`SnapshotsOperations` - * 2022-08-03-preview: :class:`SnapshotsOperations` - * 2022-09-01: :class:`SnapshotsOperations` - * 2022-09-02-preview: :class:`SnapshotsOperations` - * 2022-10-02-preview: :class:`SnapshotsOperations` - * 2022-11-01: :class:`SnapshotsOperations` - * 2022-11-02-preview: :class:`SnapshotsOperations` - * 2023-01-01: :class:`SnapshotsOperations` - * 2023-01-02-preview: :class:`SnapshotsOperations` - * 2023-02-01: :class:`SnapshotsOperations` - * 2023-02-02-preview: :class:`SnapshotsOperations` - * 2023-03-01: :class:`SnapshotsOperations` - * 2023-03-02-preview: :class:`SnapshotsOperations` - * 2023-04-01: :class:`SnapshotsOperations` - * 2023-04-02-preview: :class:`SnapshotsOperations` - * 2023-05-01: :class:`SnapshotsOperations` - * 2023-05-02-preview: :class:`SnapshotsOperations` - * 2023-06-01: :class:`SnapshotsOperations` - * 2023-06-02-preview: :class:`SnapshotsOperations` - * 2023-07-01: :class:`SnapshotsOperations` - * 2023-07-02-preview: :class:`SnapshotsOperations` - * 2023-08-01: :class:`SnapshotsOperations` - * 2023-08-02-preview: :class:`SnapshotsOperations` - * 2023-09-01: :class:`SnapshotsOperations` - * 2023-09-02-preview: :class:`SnapshotsOperations` - * 2023-10-01: :class:`SnapshotsOperations` - * 2023-10-02-preview: :class:`SnapshotsOperations` - * 2023-11-01: :class:`SnapshotsOperations` - * 2023-11-02-preview: :class:`SnapshotsOperations` - * 2024-01-01: :class:`SnapshotsOperations` - * 2024-01-02-preview: :class:`SnapshotsOperations` - * 2024-02-01: :class:`SnapshotsOperations` - * 2024-02-02-preview: :class:`SnapshotsOperations` - * 2024-03-02-preview: :class:`SnapshotsOperations` - * 2024-04-02-preview: :class:`SnapshotsOperations` - * 2024-05-01: :class:`SnapshotsOperations` - * 2024-05-02-preview: :class:`SnapshotsOperations` - * 2024-06-02-preview: :class:`SnapshotsOperations` - * 2024-07-01: :class:`SnapshotsOperations` - """ - api_version = self._get_api_version('snapshots') - if api_version == '2021-08-01': - from .v2021_08_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-09-01': - from .v2021_09_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-10-01': - from .v2021_10_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from .v2021_11_01_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-01-01': - from .v2022_01_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from .v2022_01_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-02-01': - from .v2022_02_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from .v2022_02_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-03-01': - from .v2022_03_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from .v2022_03_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-04-01': - from .v2022_04_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-06-01': - from .v2022_06_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-07-01': - from .v2022_07_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-09-01': - from .v2022_09_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-11-01': - from .v2022_11_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-01-01': - from .v2023_01_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-02-01': - from .v2023_02_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-03-01': - from .v2023_03_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-04-01': - from .v2023_04_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-05-01': - from .v2023_05_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-06-01': - from .v2023_06_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-07-01': - from .v2023_07_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-08-01': - from .v2023_08_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import SnapshotsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + - @property - def trusted_access_role_bindings(self): - """Instance depends on the API version: + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - * 2022-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-08-03-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-09-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-10-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-11-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-01-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-02-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-05-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-07-01: :class:`TrustedAccessRoleBindingsOperations` + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse """ - api_version = self._get_api_version('trusted_access_role_bindings') - if api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import TrustedAccessRoleBindingsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'trusted_access_role_bindings'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - @property - def trusted_access_roles(self): - """Instance depends on the API version: + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - * 2022-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-07-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-08-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-08-03-preview: :class:`TrustedAccessRolesOperations` - * 2022-09-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-10-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-11-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-01-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-02-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-03-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-07-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-08-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-09-01: :class:`TrustedAccessRolesOperations` - * 2023-09-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-10-01: :class:`TrustedAccessRolesOperations` - * 2023-10-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-11-01: :class:`TrustedAccessRolesOperations` - * 2023-11-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-01-01: :class:`TrustedAccessRolesOperations` - * 2024-01-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-02-01: :class:`TrustedAccessRolesOperations` - * 2024-02-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-03-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-05-01: :class:`TrustedAccessRolesOperations` - * 2024-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-07-01: :class:`TrustedAccessRolesOperations` - """ - api_version = self._get_api_version('trusted_access_roles') - if api_version == '2022-04-02-preview': - from .v2022_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-05-02-preview': - from .v2022_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-06-02-preview': - from .v2022_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-07-02-preview': - from .v2022_07_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-08-02-preview': - from .v2022_08_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-08-03-preview': - from .v2022_08_03_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-09-02-preview': - from .v2022_09_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-10-02-preview': - from .v2022_10_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-11-02-preview': - from .v2022_11_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-01-02-preview': - from .v2023_01_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-02-02-preview': - from .v2023_02_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-03-02-preview': - from .v2023_03_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-04-02-preview': - from .v2023_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-05-02-preview': - from .v2023_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-06-02-preview': - from .v2023_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-07-02-preview': - from .v2023_07_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from .v2023_08_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-09-01': - from .v2023_09_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from .v2023_09_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-10-01': - from .v2023_10_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from .v2023_10_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-11-01': - from .v2023_11_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from .v2023_11_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-01-01': - from .v2024_01_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from .v2024_01_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-02-01': - from .v2024_02_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from .v2024_02_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from .v2024_03_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from .v2024_04_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-05-01': - from .v2024_05_01.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from .v2024_05_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from .v2024_06_02_preview.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-07-01': - from .v2024_07_01.operations import TrustedAccessRolesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'trusted_access_roles'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - def close(self): + def close(self) -> None: self._client.close() - def __enter__(self): + + def __enter__(self) -> Self: self._client.__enter__() return self - def __exit__(self, *exc_details): + + def __exit__(self, *exc_details: Any) -> None: self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_patch.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_patch.py index f7dd3251033..8bcb627aa47 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_patch.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_patch.py @@ -1,7 +1,8 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/__init__.py similarity index 92% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/__init__.py index cf831534071..0af9b28f660 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_version.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/__init__.py @@ -1,9 +1,6 @@ -# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- - -VERSION = "32.0.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/serialization.py similarity index 81% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/serialization.py index 59f1fcf71bc..ff543ed937f 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_serialization.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_utils/serialization.py @@ -1,30 +1,12 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- -# # Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -# pylint: skip-file # pyright: reportUnnecessaryTypeIgnoreComment=false from base64 import b64decode, b64encode @@ -39,7 +21,6 @@ import sys import codecs from typing import ( - Dict, Any, cast, Optional, @@ -48,11 +29,7 @@ IO, Mapping, Callable, - TypeVar, MutableMapping, - Type, - List, - Mapping, ) try: @@ -62,13 +39,13 @@ import xml.etree.ElementTree as ET import isodate # type: ignore +from typing_extensions import Self from azure.core.exceptions import DeserializationError, SerializationError from azure.core.serialization import NULL as CoreNull _BOM = codecs.BOM_UTF8.decode(encoding="utf-8") -ModelType = TypeVar("ModelType", bound="Model") JSON = MutableMapping[str, Any] @@ -91,6 +68,8 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: :param data: Input, could be bytes or stream (will be decoded with UTF8) or text :type data: str or bytes or IO :param str content_type: The content type. + :return: The deserialized data. + :rtype: object """ if hasattr(data, "read"): # Assume a stream @@ -112,7 +91,7 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: try: return json.loads(data_as_str) except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) + raise DeserializationError("JSON is invalid: {}".format(err), err) from err elif "xml" in (content_type or []): try: @@ -155,6 +134,11 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], Use bytes and headers to NOT use any requests/aiohttp or whatever specific implementation. Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object """ # Try to use content-type from headers if available content_type = None @@ -179,80 +163,31 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], except NameError: _long_type = int - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0.""" - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation.""" - return "Z" - - def dst(self, dt): - """No daylight saving for UTC.""" - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset): - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore +TZ_UTC = datetime.timezone.utc _FLATTEN = re.compile(r"(? None: - self.additional_properties: Optional[Dict[str, Any]] = {} - for k in kwargs: + self.additional_properties: Optional[dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items if k not in self._attribute_map: _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) elif k in self._validation and self._validation[k].get("readonly", False): @@ -300,13 +242,23 @@ def __init__(self, **kwargs: Any) -> None: setattr(self, k, kwargs[k]) def __eq__(self, other: Any) -> bool: - """Compare objects by comparing all attributes.""" + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other: Any) -> bool: - """Compare objects by comparing all attributes.""" + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ return not self.__eq__(other) def __str__(self) -> str: @@ -326,7 +278,11 @@ def is_xml_model(cls) -> bool: @classmethod def _create_xml_node(cls): - """Create XML node.""" + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ try: xml_map = cls._xml_map # type: ignore except AttributeError: @@ -346,14 +302,14 @@ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: :rtype: dict """ serializer = Serializer(self._infer_class_models()) - return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) def as_dict( self, keep_readonly: bool = True, - key_transformer: Callable[ - [str, Dict[str, Any], Any], Any - ] = attribute_transformer, + key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer, **kwargs: Any ) -> JSON: """Return a dict that can be serialized using json.dump. @@ -382,12 +338,15 @@ def my_key_transformer(key, attr_desc, value): If you want XML serialization, you can pass the kwargs is_xml=True. + :param bool keep_readonly: If you want to serialize the readonly attributes :param function key_transformer: A key transformer function. :returns: A dict JSON compatible object :rtype: dict """ serializer = Serializer(self._infer_class_models()) - return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) @classmethod def _infer_class_models(cls): @@ -397,30 +356,31 @@ def _infer_class_models(cls): client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} if cls.__name__ not in client_models: raise ValueError("Not Autorest generated code") - except Exception: + except Exception: # pylint: disable=broad-exception-caught # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. client_models = {cls.__name__: cls} return client_models @classmethod - def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: """Parse a str using the RestAPI syntax and return a model. :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) return deserializer(cls.__name__, data, content_type=content_type) # type: ignore @classmethod def from_dict( - cls: Type[ModelType], + cls, data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None, content_type: Optional[str] = None, - ) -> ModelType: + ) -> Self: """Parse a dict using given key extractor return a model. By default consider key @@ -428,9 +388,11 @@ def from_dict( and last_rest_key_case_insensitive_extractor) :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) deserializer.key_extractors = ( # type: ignore @@ -450,21 +412,25 @@ def _flatten_subtype(cls, key, objects): return {} result = dict(cls._subtype_map[key]) for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) + result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access return result @classmethod def _classify(cls, response, objects): """Check the class _subtype_map for any child classes. We want to ignore any inherited _subtype_maps. - Remove the polymorphic key from the initial data. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class """ for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): subtype_value = None if not isinstance(response, ET.Element): rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] - subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) else: subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) if subtype_value: @@ -503,11 +469,13 @@ def _decode_attribute_map_key(key): inside the received data. :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str """ return key.replace("\\.", ".") -class Serializer(object): +class Serializer: # pylint: disable=too-many-public-methods """Request object model serializer.""" basic_types = {str: "str", int: "int", bool: "bool", float: "float"} @@ -542,7 +510,7 @@ class Serializer(object): "multiple": lambda x, y: x % y != 0, } - def __init__(self, classes: Optional[Mapping[str, type]]=None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.serialize_type = { "iso-8601": Serializer.serialize_iso, "rfc-1123": Serializer.serialize_rfc, @@ -558,17 +526,20 @@ def __init__(self, classes: Optional[Mapping[str, type]]=None): "[]": self.serialize_iter, "{}": self.serialize_dict, } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.dependencies: dict[str, type] = dict(classes) if classes else {} self.key_transformer = full_restapi_key_transformer self.client_side_validation = True - def _serialize(self, target_obj, data_type=None, **kwargs): + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): """Serialize data into a string according to type. - :param target_obj: The data to be serialized. + :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict - :raises: SerializationError if serialization fails. + :raises SerializationError: if serialization fails. + :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) keep_readonly = kwargs.get("keep_readonly", False) @@ -594,17 +565,19 @@ def _serialize(self, target_obj, data_type=None, **kwargs): serialized = {} if is_xml_model_serialization: - serialized = target_obj._create_xml_node() + serialized = target_obj._create_xml_node() # pylint: disable=protected-access try: - attributes = target_obj._attribute_map + attributes = target_obj._attribute_map # pylint: disable=protected-access for attr, attr_desc in attributes.items(): attr_name = attr - if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): continue if attr_name == "additional_properties" and attr_desc["key"] == "": if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) + serialized |= target_obj.additional_properties continue try: @@ -635,7 +608,8 @@ def _serialize(self, target_obj, data_type=None, **kwargs): if isinstance(new_attr, list): serialized.extend(new_attr) # type: ignore elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. if "name" not in getattr(orig_attr, "_xml_map", {}): splitted_tag = new_attr.tag.split("}") if len(splitted_tag) == 2: # Namespace @@ -666,17 +640,17 @@ def _serialize(self, target_obj, data_type=None, **kwargs): except (AttributeError, KeyError, TypeError) as err: msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) raise SerializationError(msg) from err - else: - return serialized + return serialized def body(self, data, data_type, **kwargs): """Serialize data intended for a request body. - :param data: The data to be serialized. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body """ # Just in case this is a dict @@ -705,7 +679,7 @@ def body(self, data, data_type, **kwargs): attribute_key_case_insensitive_extractor, last_rest_key_case_insensitive_extractor, ] - data = deserializer._deserialize(data_type, data) + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access except DeserializationError as err: raise SerializationError("Unable to build a model: " + str(err)) from err @@ -714,11 +688,13 @@ def body(self, data, data_type, **kwargs): def url(self, name, data, data_type, **kwargs): """Serialize data intended for a URL path. - :param data: The data to be serialized. + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None """ try: output = self.serialize_data(data, data_type, **kwargs) @@ -730,27 +706,26 @@ def url(self, name, data, data_type, **kwargs): output = output.replace("{", quote("{")).replace("}", quote("}")) else: output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return output + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output def query(self, name, data, data_type, **kwargs): """Serialize data intended for a URL query. - :param data: The data to be serialized. + :param str name: The name of the query parameter. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :keyword bool skip_quote: Whether to skip quote the serialized result. - Defaults to False. :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter """ try: # Treat the list aside, since we don't want to encode the div separator if data_type.startswith("["): internal_data_type = data_type[1:-1] - do_quote = not kwargs.get('skip_quote', False) + do_quote = not kwargs.get("skip_quote", False) return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) # Not a list, regular serialization @@ -761,19 +736,20 @@ def query(self, name, data, data_type, **kwargs): output = str(output) else: output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) def header(self, name, data, data_type, **kwargs): """Serialize data intended for a request header. - :param data: The data to be serialized. + :param str name: The name of the header. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header """ try: if data_type in ["[str]"]: @@ -782,21 +758,20 @@ def header(self, name, data, data_type, **kwargs): output = self.serialize_data(data, data_type, **kwargs) if data_type == "bool": output = json.dumps(output) - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) def serialize_data(self, data, data_type, **kwargs): """Serialize generic data according to supplied data type. - :param data: The data to be serialized. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :param bool required: Whether it's essential that the data not be - empty or None - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list """ if data is None: raise ValueError("No value for given attribute") @@ -807,12 +782,12 @@ def serialize_data(self, data, data_type, **kwargs): if data_type in self.basic_types.values(): return self.serialize_basic(data, data_type, **kwargs) - elif data_type in self.serialize_type: + if data_type in self.serialize_type: return self.serialize_type[data_type](data, **kwargs) # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) if issubclass(enum_type, Enum): return Serializer.serialize_enum(data, enum_obj=enum_type) @@ -823,11 +798,10 @@ def serialize_data(self, data, data_type, **kwargs): except (ValueError, TypeError) as err: msg = "Unable to serialize value: {!r} as type: {!r}." raise SerializationError(msg.format(data, data_type)) from err - else: - return self._serialize(data, **kwargs) + return self._serialize(data, **kwargs) @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) if custom_serializer: return custom_serializer @@ -843,23 +817,26 @@ def serialize_basic(cls, data, data_type, **kwargs): - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - is_xml bool : If set, use xml_basic_types_serializers - :param data: Object to be serialized. + :param obj data: Object to be serialized. :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object """ custom_serializer = cls._get_custom_serializers(data_type, **kwargs) if custom_serializer: return custom_serializer(data) if data_type == "str": return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec + return eval(data_type)(data) # nosec # pylint: disable=eval-used @classmethod def serialize_unicode(cls, data): """Special handling for serializing unicode strings in Py2. Encode to UTF-8 if unicode, otherwise handle as a str. - :param data: Object to be serialized. + :param str data: Object to be serialized. :rtype: str + :return: serialized object """ try: # If I received an enum, return its value return data.value @@ -873,8 +850,7 @@ def serialize_unicode(cls, data): return data except NameError: return str(data) - else: - return str(data) + return str(data) def serialize_iter(self, data, iter_type, div=None, **kwargs): """Serialize iterable. @@ -884,15 +860,13 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs): serialization_ctxt['type'] should be same as data_type. - is_xml bool : If set, serialize as XML - :param list attr: Object to be serialized. + :param list data: Object to be serialized. :param str iter_type: Type of object in the iterable. - :param bool required: Whether the objects in the iterable must - not be None or empty. :param str div: If set, this str will be used to combine the elements in the iterable into a combined string. Default is 'None'. - :keyword bool do_quote: Whether to quote the serialized result of each iterable element. Defaults to False. :rtype: list, str + :return: serialized iterable """ if isinstance(data, str): raise SerializationError("Refuse str type as a valid iter type.") @@ -909,12 +883,8 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs): raise serialized.append(None) - if kwargs.get('do_quote', False): - serialized = [ - '' if s is None else quote(str(s), safe='') - for s - in serialized - ] + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] if div: serialized = ["" if s is None else str(s) for s in serialized] @@ -951,9 +921,8 @@ def serialize_dict(self, attr, dict_type, **kwargs): :param dict attr: Object to be serialized. :param str dict_type: Type of object in the dictionary. - :param bool required: Whether the objects in the dictionary must - not be None or empty. :rtype: dict + :return: serialized dictionary """ serialization_ctxt = kwargs.get("serialization_ctxt", {}) serialized = {} @@ -977,7 +946,7 @@ def serialize_dict(self, attr, dict_type, **kwargs): return serialized - def serialize_object(self, attr, **kwargs): + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements """Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be @@ -985,6 +954,7 @@ def serialize_object(self, attr, **kwargs): :param dict attr: Object to be serialized. :rtype: dict or str + :return: serialized object """ if attr is None: return None @@ -1009,7 +979,7 @@ def serialize_object(self, attr, **kwargs): return self.serialize_decimal(attr) # If it's a model or I know this dependency, serialize as a Model - elif obj_type in self.dependencies.values() or isinstance(attr, Model): + if obj_type in self.dependencies.values() or isinstance(attr, Model): return self._serialize(attr) if obj_type == dict: @@ -1040,56 +1010,61 @@ def serialize_enum(attr, enum_obj=None): try: enum_obj(result) # type: ignore return result - except ValueError: + except ValueError as exc: for enum_value in enum_obj: # type: ignore if enum_value.value.lower() == str(attr).lower(): return enum_value.value error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) + raise SerializationError(error.format(attr, enum_obj)) from exc @staticmethod - def serialize_bytearray(attr, **kwargs): + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument """Serialize bytearray into base-64 string. - :param attr: Object to be serialized. + :param str attr: Object to be serialized. :rtype: str + :return: serialized base64 """ return b64encode(attr).decode() @staticmethod - def serialize_base64(attr, **kwargs): + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument """Serialize str into base-64 string. - :param attr: Object to be serialized. + :param str attr: Object to be serialized. :rtype: str + :return: serialized base64 """ encoded = b64encode(attr).decode("ascii") return encoded.strip("=").replace("+", "-").replace("/", "_") @staticmethod - def serialize_decimal(attr, **kwargs): + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument """Serialize Decimal object to float. - :param attr: Object to be serialized. + :param decimal attr: Object to be serialized. :rtype: float + :return: serialized decimal """ return float(attr) @staticmethod - def serialize_long(attr, **kwargs): + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument """Serialize long (Py2) or int (Py3). - :param attr: Object to be serialized. + :param int attr: Object to be serialized. :rtype: int/long + :return: serialized long """ return _long_type(attr) @staticmethod - def serialize_date(attr, **kwargs): + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument """Serialize Date object into ISO-8601 formatted string. :param Date attr: Object to be serialized. :rtype: str + :return: serialized date """ if isinstance(attr, str): attr = isodate.parse_date(attr) @@ -1097,11 +1072,12 @@ def serialize_date(attr, **kwargs): return t @staticmethod - def serialize_time(attr, **kwargs): + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument """Serialize Time object into ISO-8601 formatted string. :param datetime.time attr: Object to be serialized. :rtype: str + :return: serialized time """ if isinstance(attr, str): attr = isodate.parse_time(attr) @@ -1111,30 +1087,32 @@ def serialize_time(attr, **kwargs): return t @staticmethod - def serialize_duration(attr, **kwargs): + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument """Serialize TimeDelta object into ISO-8601 formatted string. :param TimeDelta attr: Object to be serialized. :rtype: str + :return: serialized duration """ if isinstance(attr, str): attr = isodate.parse_duration(attr) return isodate.duration_isoformat(attr) @staticmethod - def serialize_rfc(attr, **kwargs): + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into RFC-1123 formatted string. :param Datetime attr: Object to be serialized. :rtype: str - :raises: TypeError if format invalid. + :raises TypeError: if format invalid. + :return: serialized rfc """ try: if not attr.tzinfo: _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") utc = attr.utctimetuple() - except AttributeError: - raise TypeError("RFC1123 object must be valid Datetime object.") + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( Serializer.days[utc.tm_wday], @@ -1147,12 +1125,13 @@ def serialize_rfc(attr, **kwargs): ) @staticmethod - def serialize_iso(attr, **kwargs): + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into ISO-8601 formatted string. :param Datetime attr: Object to be serialized. :rtype: str - :raises: SerializationError if format invalid. + :raises SerializationError: if format invalid. + :return: serialized iso """ if isinstance(attr, str): attr = isodate.parse_datetime(attr) @@ -1178,13 +1157,14 @@ def serialize_iso(attr, **kwargs): raise TypeError(msg) from err @staticmethod - def serialize_unix(attr, **kwargs): + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into IntTime format. This is represented as seconds. :param Datetime attr: Object to be serialized. :rtype: int - :raises: SerializationError if format invalid + :raises SerializationError: if format invalid + :return: serialied unix """ if isinstance(attr, int): return attr @@ -1192,17 +1172,17 @@ def serialize_unix(attr, **kwargs): if not attr.tzinfo: _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") return int(calendar.timegm(attr.utctimetuple())) - except AttributeError: - raise TypeError("Unix time object must be valid Datetime object.") + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc -def rest_key_extractor(attr, attr_desc, data): +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument key = attr_desc["key"] working_data = data while "." in key: # Need the cast, as for some reasons "split" is typed as list[str | Any] - dict_keys = cast(List[str], _FLATTEN.split(key)) + dict_keys = cast(list[str], _FLATTEN.split(key)) if len(dict_keys) == 1: key = _decode_attribute_map_key(dict_keys[0]) break @@ -1217,7 +1197,9 @@ def rest_key_extractor(attr, attr_desc, data): return working_data.get(key) -def rest_key_case_insensitive_extractor(attr, attr_desc, data): +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): key = attr_desc["key"] working_data = data @@ -1238,17 +1220,29 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data): return attribute_key_case_insensitive_extractor(key, None, working_data) -def last_rest_key_extractor(attr, attr_desc, data): - """Extract the attribute in "data" based on the last part of the JSON path key.""" +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ key = attr_desc["key"] dict_keys = _FLATTEN.split(key) return attribute_key_extractor(dict_keys[-1], None, data) -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument """Extract the attribute in "data" based on the last part of the JSON path key. This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute """ key = attr_desc["key"] dict_keys = _FLATTEN.split(key) @@ -1285,7 +1279,7 @@ def _extract_name_from_internal_type(internal_type): return xml_name -def xml_key_extractor(attr, attr_desc, data): +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements if isinstance(data, dict): return None @@ -1337,22 +1331,21 @@ def xml_key_extractor(attr, attr_desc, data): if is_iter_type: if is_wrapped: return None # is_wrapped no node, we want None - else: - return [] # not wrapped, assume empty list + return [] # not wrapped, assume empty list return None # Assume it's not there, maybe an optional node. # If is_iter_type and not wrapped, return all found children if is_iter_type: if not is_wrapped: return children - else: # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( - xml_name - ) + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name ) - return list(children[0]) # Might be empty list and that's ok. + ) + return list(children[0]) # Might be empty list and that's ok. # Here it's not a itertype, we should have found one element only or empty if len(children) > 1: @@ -1360,7 +1353,7 @@ def xml_key_extractor(attr, attr_desc, data): return children[0] -class Deserializer(object): +class Deserializer: """Response object model deserializer. :param dict classes: Class type dictionary for deserializing complex types. @@ -1369,9 +1362,9 @@ class Deserializer(object): basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - def __init__(self, classes: Optional[Mapping[str, type]]=None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.deserialize_type = { "iso-8601": Deserializer.deserialize_iso, "rfc-1123": Deserializer.deserialize_rfc, @@ -1391,7 +1384,7 @@ def __init__(self, classes: Optional[Mapping[str, type]]=None): "duration": (isodate.Duration, datetime.timedelta), "iso-8601": (datetime.datetime), } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.dependencies: dict[str, type] = dict(classes) if classes else {} self.key_extractors = [rest_key_extractor, xml_key_extractor] # Additional properties only works if the "rest_key_extractor" is used to # extract the keys. Making it to work whatever the key extractor is too much @@ -1407,27 +1400,29 @@ def __call__(self, target_obj, response_data, content_type=None): :param str target_obj: Target data type to deserialize to. :param requests.Response response_data: REST response object. :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. + :rtype: object """ data = self._unpack_content(response_data, content_type) return self._deserialize(target_obj, data) - def _deserialize(self, target_obj, data): + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements """Call the deserializer on a model. Data needs to be already deserialized as JSON or XML ElementTree :param str target_obj: Target data type to deserialize to. :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. + :rtype: object """ # This is already a model, go recursive just in case if hasattr(data, "_attribute_map"): constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] try: - for attr, mapconfig in data._attribute_map.items(): + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access if attr in constants: continue value = getattr(data, attr) @@ -1446,13 +1441,13 @@ def _deserialize(self, target_obj, data): if isinstance(response, str): return self.deserialize_data(data, response) - elif isinstance(response, type) and issubclass(response, Enum): + if isinstance(response, type) and issubclass(response, Enum): return self.deserialize_enum(data, response) if data is None or data is CoreNull: return data try: - attributes = response._attribute_map # type: ignore + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access d_attrs = {} for attr, attr_desc in attributes.items(): # Check empty string. If it's not empty, someone has a real "additionalProperties"... @@ -1482,9 +1477,8 @@ def _deserialize(self, target_obj, data): except (AttributeError, TypeError, KeyError) as err: msg = "Unable to deserialize to object: " + class_name # type: ignore raise DeserializationError(msg) from err - else: - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) def _build_additional_properties(self, attribute_map, data): if not self.additional_properties_detection: @@ -1511,6 +1505,8 @@ def _classify_target(self, target, data): :param str target: The target object type to deserialize to. :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple """ if target is None: return None, None @@ -1522,7 +1518,7 @@ def _classify_target(self, target, data): return target, target try: - target = target._classify(data, self.dependencies) # type: ignore + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access except AttributeError: pass # Target is not a Model, no classify return target, target.__class__.__name__ # type: ignore @@ -1537,10 +1533,12 @@ def failsafe_deserialize(self, target_obj, data, content_type=None): :param str target_obj: The target object type to deserialize to. :param str/dict data: The response data to deserialize. :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object """ try: return self(target_obj, data, content_type=content_type) - except: + except: # pylint: disable=bare-except _LOGGER.debug( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True ) @@ -1558,10 +1556,12 @@ def _unpack_content(raw_data, content_type=None): If raw_data is something else, bypass all logic and return it directly. - :param raw_data: Data to be processed. - :param content_type: How to parse if raw_data is a string/bytes. + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. :raises JSONDecodeError: If JSON is requested and parsing is impossible. :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. """ # Assume this is enough to detect a Pipeline Response without importing it context = getattr(raw_data, "context", {}) @@ -1585,24 +1585,35 @@ def _unpack_content(raw_data, content_type=None): def _instantiate_model(self, response, attrs, additional_properties=None): """Instantiate a response model passing in deserialized args. - :param response: The response model class. - :param d_attrs: The deserialized response attributes. + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. """ if callable(response): subtype = getattr(response, "_subtype_map", {}) try: - readonly = [k for k, v in response._validation.items() if v.get("readonly")] - const = [k for k, v in response._validation.items() if v.get("constant")] + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} response_obj = response(**kwargs) for attr in readonly: setattr(response_obj, attr, attrs.get(attr)) if additional_properties: - response_obj.additional_properties = additional_properties + response_obj.additional_properties = additional_properties # type: ignore return response_obj except TypeError as err: msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore - raise DeserializationError(msg + str(err)) + raise DeserializationError(msg + str(err)) from err else: try: for attr, value in attrs.items(): @@ -1611,15 +1622,16 @@ def _instantiate_model(self, response, attrs, additional_properties=None): except Exception as exp: msg = "Unable to populate response model. " msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) + raise DeserializationError(msg) from exp - def deserialize_data(self, data, data_type): + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements """Process data for deserialization according to data type. :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. + :rtype: object """ if data is None: return data @@ -1633,7 +1645,11 @@ def deserialize_data(self, data, data_type): if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): return data - is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: return None data_val = self.deserialize_type[data_type](data) @@ -1653,14 +1669,14 @@ def deserialize_data(self, data, data_type): msg = "Unable to deserialize response data." msg += " Data: {}, {}".format(data, data_type) raise DeserializationError(msg) from err - else: - return self._deserialize(obj_type, data) + return self._deserialize(obj_type, data) def deserialize_iter(self, attr, iter_type): """Deserialize an iterable. :param list attr: Iterable to be deserialized. :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. :rtype: list """ if attr is None: @@ -1677,6 +1693,7 @@ def deserialize_dict(self, attr, dict_type): :param dict/list attr: Dictionary to be deserialized. Also accepts a list of key, value pairs. :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. :rtype: dict """ if isinstance(attr, list): @@ -1687,13 +1704,14 @@ def deserialize_dict(self, attr, dict_type): attr = {el.tag: el.text for el in attr} return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - def deserialize_object(self, attr, **kwargs): + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements """Deserialize a generic object. This will be handled as a dictionary. :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. :rtype: dict - :raises: TypeError if non-builtin datatype encountered. + :raises TypeError: if non-builtin datatype encountered. """ if attr is None: return None @@ -1726,11 +1744,10 @@ def deserialize_object(self, attr, **kwargs): pass return deserialized - else: - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) - def deserialize_basic(self, attr, data_type): + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements """Deserialize basic builtin data type from string. Will attempt to convert to str, int, float and bool. This function will also accept '1', '0', 'true' and 'false' as @@ -1738,8 +1755,9 @@ def deserialize_basic(self, attr, data_type): :param str attr: response string to be deserialized. :param str data_type: deserialization data type. + :return: Deserialized basic type. :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. + :raises TypeError: if string format is not valid. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1749,24 +1767,23 @@ def deserialize_basic(self, attr, data_type): if data_type == "str": # None or '', node is empty string. return "" - else: - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None if data_type == "bool": if attr in [True, False, 1, 0]: return bool(attr) - elif isinstance(attr, str): + if isinstance(attr, str): if attr.lower() in ["true", "1"]: return True - elif attr.lower() in ["false", "0"]: + if attr.lower() in ["false", "0"]: return False raise TypeError("Invalid boolean value: {}".format(attr)) if data_type == "str": return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec + return eval(data_type)(attr) # nosec # pylint: disable=eval-used @staticmethod def deserialize_unicode(data): @@ -1774,6 +1791,7 @@ def deserialize_unicode(data): as a string. :param str data: response string to be deserialized. + :return: Deserialized string. :rtype: str or unicode """ # We might be here because we have an enum modeled as string, @@ -1787,8 +1805,7 @@ def deserialize_unicode(data): return data except NameError: return str(data) - else: - return str(data) + return str(data) @staticmethod def deserialize_enum(data, enum_obj): @@ -1800,6 +1817,7 @@ def deserialize_enum(data, enum_obj): :param str data: Response string to be deserialized. If this value is None or invalid it will be returned as-is. :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. :rtype: Enum """ if isinstance(data, enum_obj) or data is None: @@ -1810,9 +1828,9 @@ def deserialize_enum(data, enum_obj): # Workaround. We might consider remove it in the future. try: return list(enum_obj.__members__.values())[data] - except IndexError: + except IndexError as exc: error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) + raise DeserializationError(error.format(data, enum_obj)) from exc try: return enum_obj(str(data)) except ValueError: @@ -1828,8 +1846,9 @@ def deserialize_bytearray(attr): """Deserialize string into bytearray. :param str attr: response string to be deserialized. + :return: Deserialized bytearray :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1840,8 +1859,9 @@ def deserialize_base64(attr): """Deserialize base64 encoded string into string. :param str attr: response string to be deserialized. + :return: Deserialized base64 string :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1855,8 +1875,9 @@ def deserialize_decimal(attr): """Deserialize string into Decimal object. :param str attr: response string to be deserialized. - :rtype: Decimal - :raises: DeserializationError if string format invalid. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal """ if isinstance(attr, ET.Element): attr = attr.text @@ -1871,8 +1892,9 @@ def deserialize_long(attr): """Deserialize string into long (Py2) or int (Py3). :param str attr: response string to be deserialized. + :return: Deserialized int :rtype: long or int - :raises: ValueError if string format invalid. + :raises ValueError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1883,8 +1905,9 @@ def deserialize_duration(attr): """Deserialize ISO-8601 formatted string into TimeDelta object. :param str attr: response string to be deserialized. + :return: Deserialized duration :rtype: TimeDelta - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1893,16 +1916,16 @@ def deserialize_duration(attr): except (ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize duration object." raise DeserializationError(msg) from err - else: - return duration + return duration @staticmethod def deserialize_date(attr): """Deserialize ISO-8601 formatted string into Date object. :param str attr: response string to be deserialized. + :return: Deserialized date :rtype: Date - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1916,8 +1939,9 @@ def deserialize_time(attr): """Deserialize ISO-8601 formatted string into time object. :param str attr: response string to be deserialized. + :return: Deserialized time :rtype: datetime.time - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1930,31 +1954,32 @@ def deserialize_rfc(attr): """Deserialize RFC-1123 formatted string into Datetime object. :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: parsed_date = email.utils.parsedate_tz(attr) # type: ignore date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) ) if not date_obj.tzinfo: date_obj = date_obj.astimezone(tz=TZ_UTC) except ValueError as err: msg = "Cannot deserialize to rfc datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj @staticmethod def deserialize_iso(attr): """Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1982,8 +2007,7 @@ def deserialize_iso(attr): except (ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj @staticmethod def deserialize_unix(attr): @@ -1991,8 +2015,9 @@ def deserialize_unix(attr): This is represented as seconds. :param int attr: Object to be serialized. + :return: Deserialized datetime :rtype: Datetime - :raises: DeserializationError if format invalid + :raises DeserializationError: if format invalid """ if isinstance(attr, ET.Element): attr = int(attr.text) # type: ignore @@ -2002,5 +2027,4 @@ def deserialize_unix(attr): except ValueError as err: msg = "Cannot deserialize to unix datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py index 92050be7ebd..0c7b9c7ab52 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/_version.py @@ -5,4 +5,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "32.0.0" +VERSION = "40.2.0" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py index 4ad2bb20096..a55668168f0 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/__init__.py @@ -5,6 +5,25 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._container_service_client import ContainerServiceClient -__all__ = ['ContainerServiceClient'] +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._container_service_client import ContainerServiceClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ContainerServiceClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py index 5cb904176b5..f67b2db6f29 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_configuration.py @@ -1,14 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, TYPE_CHECKING + +from typing import Any, Optional, TYPE_CHECKING from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy @@ -16,10 +14,11 @@ from .._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports + from azure.core import AzureClouds from azure.core.credentials_async import AsyncTokenCredential -class ContainerServiceClientConfiguration: + +class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes """Configuration for ContainerServiceClient. Note that all parameters used to create this instance are saved as instance @@ -29,14 +28,23 @@ class ContainerServiceClientConfiguration: :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. :type subscription_id: str + :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :type cloud_setting: ~azure.core.AzureClouds + :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, + cloud_setting: Optional["AzureClouds"] = None, **kwargs: Any ) -> None: + api_version: str = kwargs.pop("api_version", "2025-10-01") + if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: @@ -44,23 +52,24 @@ def __init__( self.credential = credential self.subscription_id = subscription_id - self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) - kwargs.setdefault('sdk_moniker', 'azure-mgmt-containerservice/{}'.format(VERSION)) + self.cloud_setting = cloud_setting + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs) + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py index 5d89a224cab..40bf44c3044 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_container_service_client.py @@ -1,89 +1,117 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Optional, TYPE_CHECKING +from copy import deepcopy +from typing import Any, Awaitable, Optional, TYPE_CHECKING, cast from typing_extensions import Self from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.settings import settings from azure.mgmt.core import AsyncARMPipelineClient from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy -from azure.profiles import KnownProfiles, ProfileDefinition -from azure.profiles.multiapiclient import MultiApiClientMixin +from azure.mgmt.core.tools import get_arm_endpoints -from .._serialization import Deserializer, Serializer +from .. import models as _models +from .._utils.serialization import Deserializer, Serializer from ._configuration import ContainerServiceClientConfiguration +from .operations import ( + AgentPoolsOperations, + MachinesOperations, + MaintenanceConfigurationsOperations, + ManagedClustersOperations, + ManagedNamespacesOperations, + Operations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + ResolvePrivateLinkServiceIdOperations, + SnapshotsOperations, + TrustedAccessRoleBindingsOperations, + TrustedAccessRolesOperations, +) if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports + from azure.core import AzureClouds from azure.core.credentials_async import AsyncTokenCredential -class _SDKClient(object): - def __init__(self, *args, **kwargs): - """This is a fake class to support current implemetation of MultiApiClientMixin." - Will be removed in final version of multiapi azure-core based client - """ - pass -class ContainerServiceClient(MultiApiClientMixin, _SDKClient): +class ContainerServiceClient: # pylint: disable=too-many-instance-attributes """The Container Service Client. - This ready contains multiple API versions, to help you deal with all of the Azure clouds - (Azure Stack, Azure Government, Azure China, etc.). - By default, it uses the latest API version available on public Azure. - For production, you should stick to a particular api-version and/or profile. - The profile sets a mapping between an operation group and its API version. - The api-version parameter sets the default API version if the operation - group is not described in the profile. - + :ivar operations: Operations operations + :vartype operations: azure.mgmt.containerservice.aio.operations.Operations + :ivar managed_clusters: ManagedClustersOperations operations + :vartype managed_clusters: azure.mgmt.containerservice.aio.operations.ManagedClustersOperations + :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations + :vartype maintenance_configurations: + azure.mgmt.containerservice.aio.operations.MaintenanceConfigurationsOperations + :ivar managed_namespaces: ManagedNamespacesOperations operations + :vartype managed_namespaces: + azure.mgmt.containerservice.aio.operations.ManagedNamespacesOperations + :ivar agent_pools: AgentPoolsOperations operations + :vartype agent_pools: azure.mgmt.containerservice.aio.operations.AgentPoolsOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.containerservice.aio.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.containerservice.aio.operations.PrivateLinkResourcesOperations + :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations + :vartype resolve_private_link_service_id: + azure.mgmt.containerservice.aio.operations.ResolvePrivateLinkServiceIdOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.containerservice.aio.operations.SnapshotsOperations + :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations + :vartype trusted_access_role_bindings: + azure.mgmt.containerservice.aio.operations.TrustedAccessRoleBindingsOperations + :ivar trusted_access_roles: TrustedAccessRolesOperations operations + :vartype trusted_access_roles: + azure.mgmt.containerservice.aio.operations.TrustedAccessRolesOperations + :ivar machines: MachinesOperations operations + :vartype machines: azure.mgmt.containerservice.aio.operations.MachinesOperations :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. :type subscription_id: str - :param api_version: API version to use if no profile is provided, or if missing in profile. - :type api_version: str - :param base_url: Service URL + :param base_url: Service URL. Default value is None. :type base_url: str - :param profile: A profile definition, from KnownProfiles to dict. - :type profile: azure.profiles.KnownProfiles - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :paramtype cloud_setting: ~azure.core.AzureClouds + :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ - DEFAULT_API_VERSION = '2024-07-01' - _PROFILE_TAG = "azure.mgmt.containerservice.ContainerServiceClient" - LATEST_PROFILE = ProfileDefinition({ - _PROFILE_TAG: { - None: DEFAULT_API_VERSION, - 'container_services': '2019-04-01', - 'fleet_members': '2022-09-02-preview', - 'fleets': '2022-09-02-preview', - 'load_balancers': '2024-06-02-preview', - 'managed_cluster_snapshots': '2024-06-02-preview', - 'open_shift_managed_clusters': '2019-04-30', - 'operation_status_result': '2024-06-02-preview', - }}, - _PROFILE_TAG + " latest" - ) - def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, - api_version: Optional[str] = None, - base_url: str = "https://management.azure.com", - profile: KnownProfiles = KnownProfiles.default, + base_url: Optional[str] = None, + *, + cloud_setting: Optional["AzureClouds"] = None, **kwargs: Any ) -> None: - if api_version: - kwargs.setdefault('api_version', api_version) - self._config = ContainerServiceClientConfiguration(credential, subscription_id, **kwargs) + _cloud = cloud_setting or settings.current.azure_cloud # type: ignore + _endpoints = get_arm_endpoints(_cloud) + if not base_url: + base_url = _endpoints["resource_manager"] + credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"]) + self._config = ContainerServiceClientConfiguration( + credential=credential, + subscription_id=subscription_id, + cloud_setting=cloud_setting, + credential_scopes=credential_scopes, + **kwargs + ) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -102,2554 +130,73 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, self._config.http_logging_policy, ] - self._client = AsyncARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) - super(ContainerServiceClient, self).__init__( - api_version=api_version, - profile=profile + self._client: AsyncARMPipelineClient = AsyncARMPipelineClient( + base_url=cast(str, base_url), policies=_policies, **kwargs ) - @classmethod - def _models_dict(cls, api_version): - return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} - - @classmethod - def models(cls, api_version=DEFAULT_API_VERSION): - """Module depends on the API version: - - * 2017-07-01: :mod:`v2017_07_01.models` - * 2018-03-31: :mod:`v2018_03_31.models` - * 2018-08-01-preview: :mod:`v2018_08_01_preview.models` - * 2018-09-30-preview: :mod:`v2018_09_30_preview.models` - * 2019-02-01: :mod:`v2019_02_01.models` - * 2019-04-01: :mod:`v2019_04_01.models` - * 2019-04-30: :mod:`v2019_04_30.models` - * 2019-06-01: :mod:`v2019_06_01.models` - * 2019-08-01: :mod:`v2019_08_01.models` - * 2019-09-30-preview: :mod:`v2019_09_30_preview.models` - * 2019-10-01: :mod:`v2019_10_01.models` - * 2019-10-27-preview: :mod:`v2019_10_27_preview.models` - * 2019-11-01: :mod:`v2019_11_01.models` - * 2020-01-01: :mod:`v2020_01_01.models` - * 2020-02-01: :mod:`v2020_02_01.models` - * 2020-03-01: :mod:`v2020_03_01.models` - * 2020-04-01: :mod:`v2020_04_01.models` - * 2020-06-01: :mod:`v2020_06_01.models` - * 2020-07-01: :mod:`v2020_07_01.models` - * 2020-09-01: :mod:`v2020_09_01.models` - * 2020-11-01: :mod:`v2020_11_01.models` - * 2020-12-01: :mod:`v2020_12_01.models` - * 2021-02-01: :mod:`v2021_02_01.models` - * 2021-03-01: :mod:`v2021_03_01.models` - * 2021-05-01: :mod:`v2021_05_01.models` - * 2021-07-01: :mod:`v2021_07_01.models` - * 2021-08-01: :mod:`v2021_08_01.models` - * 2021-09-01: :mod:`v2021_09_01.models` - * 2021-10-01: :mod:`v2021_10_01.models` - * 2021-11-01-preview: :mod:`v2021_11_01_preview.models` - * 2022-01-01: :mod:`v2022_01_01.models` - * 2022-01-02-preview: :mod:`v2022_01_02_preview.models` - * 2022-02-01: :mod:`v2022_02_01.models` - * 2022-02-02-preview: :mod:`v2022_02_02_preview.models` - * 2022-03-01: :mod:`v2022_03_01.models` - * 2022-03-02-preview: :mod:`v2022_03_02_preview.models` - * 2022-04-01: :mod:`v2022_04_01.models` - * 2022-04-02-preview: :mod:`v2022_04_02_preview.models` - * 2022-05-02-preview: :mod:`v2022_05_02_preview.models` - * 2022-06-01: :mod:`v2022_06_01.models` - * 2022-06-02-preview: :mod:`v2022_06_02_preview.models` - * 2022-07-01: :mod:`v2022_07_01.models` - * 2022-07-02-preview: :mod:`v2022_07_02_preview.models` - * 2022-08-02-preview: :mod:`v2022_08_02_preview.models` - * 2022-08-03-preview: :mod:`v2022_08_03_preview.models` - * 2022-09-01: :mod:`v2022_09_01.models` - * 2022-09-02-preview: :mod:`v2022_09_02_preview.models` - * 2022-10-02-preview: :mod:`v2022_10_02_preview.models` - * 2022-11-01: :mod:`v2022_11_01.models` - * 2022-11-02-preview: :mod:`v2022_11_02_preview.models` - * 2023-01-01: :mod:`v2023_01_01.models` - * 2023-01-02-preview: :mod:`v2023_01_02_preview.models` - * 2023-02-01: :mod:`v2023_02_01.models` - * 2023-02-02-preview: :mod:`v2023_02_02_preview.models` - * 2023-03-01: :mod:`v2023_03_01.models` - * 2023-03-02-preview: :mod:`v2023_03_02_preview.models` - * 2023-04-01: :mod:`v2023_04_01.models` - * 2023-04-02-preview: :mod:`v2023_04_02_preview.models` - * 2023-05-01: :mod:`v2023_05_01.models` - * 2023-05-02-preview: :mod:`v2023_05_02_preview.models` - * 2023-06-01: :mod:`v2023_06_01.models` - * 2023-06-02-preview: :mod:`v2023_06_02_preview.models` - * 2023-07-01: :mod:`v2023_07_01.models` - * 2023-07-02-preview: :mod:`v2023_07_02_preview.models` - * 2023-08-01: :mod:`v2023_08_01.models` - * 2023-08-02-preview: :mod:`v2023_08_02_preview.models` - * 2023-09-01: :mod:`v2023_09_01.models` - * 2023-09-02-preview: :mod:`v2023_09_02_preview.models` - * 2023-10-01: :mod:`v2023_10_01.models` - * 2023-10-02-preview: :mod:`v2023_10_02_preview.models` - * 2023-11-01: :mod:`v2023_11_01.models` - * 2023-11-02-preview: :mod:`v2023_11_02_preview.models` - * 2024-01-01: :mod:`v2024_01_01.models` - * 2024-01-02-preview: :mod:`v2024_01_02_preview.models` - * 2024-02-01: :mod:`v2024_02_01.models` - * 2024-02-02-preview: :mod:`v2024_02_02_preview.models` - * 2024-03-02-preview: :mod:`v2024_03_02_preview.models` - * 2024-04-02-preview: :mod:`v2024_04_02_preview.models` - * 2024-05-01: :mod:`v2024_05_01.models` - * 2024-05-02-preview: :mod:`v2024_05_02_preview.models` - * 2024-06-02-preview: :mod:`v2024_06_02_preview.models` - * 2024-07-01: :mod:`v2024_07_01.models` - """ - if api_version == '2017-07-01': - from ..v2017_07_01 import models - return models - elif api_version == '2018-03-31': - from ..v2018_03_31 import models - return models - elif api_version == '2018-08-01-preview': - from ..v2018_08_01_preview import models - return models - elif api_version == '2018-09-30-preview': - from ..v2018_09_30_preview import models - return models - elif api_version == '2019-02-01': - from ..v2019_02_01 import models - return models - elif api_version == '2019-04-01': - from ..v2019_04_01 import models - return models - elif api_version == '2019-04-30': - from ..v2019_04_30 import models - return models - elif api_version == '2019-06-01': - from ..v2019_06_01 import models - return models - elif api_version == '2019-08-01': - from ..v2019_08_01 import models - return models - elif api_version == '2019-09-30-preview': - from ..v2019_09_30_preview import models - return models - elif api_version == '2019-10-01': - from ..v2019_10_01 import models - return models - elif api_version == '2019-10-27-preview': - from ..v2019_10_27_preview import models - return models - elif api_version == '2019-11-01': - from ..v2019_11_01 import models - return models - elif api_version == '2020-01-01': - from ..v2020_01_01 import models - return models - elif api_version == '2020-02-01': - from ..v2020_02_01 import models - return models - elif api_version == '2020-03-01': - from ..v2020_03_01 import models - return models - elif api_version == '2020-04-01': - from ..v2020_04_01 import models - return models - elif api_version == '2020-06-01': - from ..v2020_06_01 import models - return models - elif api_version == '2020-07-01': - from ..v2020_07_01 import models - return models - elif api_version == '2020-09-01': - from ..v2020_09_01 import models - return models - elif api_version == '2020-11-01': - from ..v2020_11_01 import models - return models - elif api_version == '2020-12-01': - from ..v2020_12_01 import models - return models - elif api_version == '2021-02-01': - from ..v2021_02_01 import models - return models - elif api_version == '2021-03-01': - from ..v2021_03_01 import models - return models - elif api_version == '2021-05-01': - from ..v2021_05_01 import models - return models - elif api_version == '2021-07-01': - from ..v2021_07_01 import models - return models - elif api_version == '2021-08-01': - from ..v2021_08_01 import models - return models - elif api_version == '2021-09-01': - from ..v2021_09_01 import models - return models - elif api_version == '2021-10-01': - from ..v2021_10_01 import models - return models - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview import models - return models - elif api_version == '2022-01-01': - from ..v2022_01_01 import models - return models - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview import models - return models - elif api_version == '2022-02-01': - from ..v2022_02_01 import models - return models - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview import models - return models - elif api_version == '2022-03-01': - from ..v2022_03_01 import models - return models - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview import models - return models - elif api_version == '2022-04-01': - from ..v2022_04_01 import models - return models - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview import models - return models - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview import models - return models - elif api_version == '2022-06-01': - from ..v2022_06_01 import models - return models - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview import models - return models - elif api_version == '2022-07-01': - from ..v2022_07_01 import models - return models - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview import models - return models - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview import models - return models - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview import models - return models - elif api_version == '2022-09-01': - from ..v2022_09_01 import models - return models - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview import models - return models - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview import models - return models - elif api_version == '2022-11-01': - from ..v2022_11_01 import models - return models - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview import models - return models - elif api_version == '2023-01-01': - from ..v2023_01_01 import models - return models - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview import models - return models - elif api_version == '2023-02-01': - from ..v2023_02_01 import models - return models - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview import models - return models - elif api_version == '2023-03-01': - from ..v2023_03_01 import models - return models - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview import models - return models - elif api_version == '2023-04-01': - from ..v2023_04_01 import models - return models - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview import models - return models - elif api_version == '2023-05-01': - from ..v2023_05_01 import models - return models - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview import models - return models - elif api_version == '2023-06-01': - from ..v2023_06_01 import models - return models - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview import models - return models - elif api_version == '2023-07-01': - from ..v2023_07_01 import models - return models - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview import models - return models - elif api_version == '2023-08-01': - from ..v2023_08_01 import models - return models - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview import models - return models - elif api_version == '2023-09-01': - from ..v2023_09_01 import models - return models - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview import models - return models - elif api_version == '2023-10-01': - from ..v2023_10_01 import models - return models - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview import models - return models - elif api_version == '2023-11-01': - from ..v2023_11_01 import models - return models - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview import models - return models - elif api_version == '2024-01-01': - from ..v2024_01_01 import models - return models - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview import models - return models - elif api_version == '2024-02-01': - from ..v2024_02_01 import models - return models - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview import models - return models - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview import models - return models - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview import models - return models - elif api_version == '2024-05-01': - from ..v2024_05_01 import models - return models - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview import models - return models - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview import models - return models - elif api_version == '2024-07-01': - from ..v2024_07_01 import models - return models - raise ValueError("API version {} is not available".format(api_version)) - - @property - def agent_pools(self): - """Instance depends on the API version: - - * 2019-02-01: :class:`AgentPoolsOperations` - * 2019-04-01: :class:`AgentPoolsOperations` - * 2019-06-01: :class:`AgentPoolsOperations` - * 2019-08-01: :class:`AgentPoolsOperations` - * 2019-10-01: :class:`AgentPoolsOperations` - * 2019-11-01: :class:`AgentPoolsOperations` - * 2020-01-01: :class:`AgentPoolsOperations` - * 2020-02-01: :class:`AgentPoolsOperations` - * 2020-03-01: :class:`AgentPoolsOperations` - * 2020-04-01: :class:`AgentPoolsOperations` - * 2020-06-01: :class:`AgentPoolsOperations` - * 2020-07-01: :class:`AgentPoolsOperations` - * 2020-09-01: :class:`AgentPoolsOperations` - * 2020-11-01: :class:`AgentPoolsOperations` - * 2020-12-01: :class:`AgentPoolsOperations` - * 2021-02-01: :class:`AgentPoolsOperations` - * 2021-03-01: :class:`AgentPoolsOperations` - * 2021-05-01: :class:`AgentPoolsOperations` - * 2021-07-01: :class:`AgentPoolsOperations` - * 2021-08-01: :class:`AgentPoolsOperations` - * 2021-09-01: :class:`AgentPoolsOperations` - * 2021-10-01: :class:`AgentPoolsOperations` - * 2021-11-01-preview: :class:`AgentPoolsOperations` - * 2022-01-01: :class:`AgentPoolsOperations` - * 2022-01-02-preview: :class:`AgentPoolsOperations` - * 2022-02-01: :class:`AgentPoolsOperations` - * 2022-02-02-preview: :class:`AgentPoolsOperations` - * 2022-03-01: :class:`AgentPoolsOperations` - * 2022-03-02-preview: :class:`AgentPoolsOperations` - * 2022-04-01: :class:`AgentPoolsOperations` - * 2022-04-02-preview: :class:`AgentPoolsOperations` - * 2022-05-02-preview: :class:`AgentPoolsOperations` - * 2022-06-01: :class:`AgentPoolsOperations` - * 2022-06-02-preview: :class:`AgentPoolsOperations` - * 2022-07-01: :class:`AgentPoolsOperations` - * 2022-07-02-preview: :class:`AgentPoolsOperations` - * 2022-08-02-preview: :class:`AgentPoolsOperations` - * 2022-08-03-preview: :class:`AgentPoolsOperations` - * 2022-09-01: :class:`AgentPoolsOperations` - * 2022-09-02-preview: :class:`AgentPoolsOperations` - * 2022-10-02-preview: :class:`AgentPoolsOperations` - * 2022-11-01: :class:`AgentPoolsOperations` - * 2022-11-02-preview: :class:`AgentPoolsOperations` - * 2023-01-01: :class:`AgentPoolsOperations` - * 2023-01-02-preview: :class:`AgentPoolsOperations` - * 2023-02-01: :class:`AgentPoolsOperations` - * 2023-02-02-preview: :class:`AgentPoolsOperations` - * 2023-03-01: :class:`AgentPoolsOperations` - * 2023-03-02-preview: :class:`AgentPoolsOperations` - * 2023-04-01: :class:`AgentPoolsOperations` - * 2023-04-02-preview: :class:`AgentPoolsOperations` - * 2023-05-01: :class:`AgentPoolsOperations` - * 2023-05-02-preview: :class:`AgentPoolsOperations` - * 2023-06-01: :class:`AgentPoolsOperations` - * 2023-06-02-preview: :class:`AgentPoolsOperations` - * 2023-07-01: :class:`AgentPoolsOperations` - * 2023-07-02-preview: :class:`AgentPoolsOperations` - * 2023-08-01: :class:`AgentPoolsOperations` - * 2023-08-02-preview: :class:`AgentPoolsOperations` - * 2023-09-01: :class:`AgentPoolsOperations` - * 2023-09-02-preview: :class:`AgentPoolsOperations` - * 2023-10-01: :class:`AgentPoolsOperations` - * 2023-10-02-preview: :class:`AgentPoolsOperations` - * 2023-11-01: :class:`AgentPoolsOperations` - * 2023-11-02-preview: :class:`AgentPoolsOperations` - * 2024-01-01: :class:`AgentPoolsOperations` - * 2024-01-02-preview: :class:`AgentPoolsOperations` - * 2024-02-01: :class:`AgentPoolsOperations` - * 2024-02-02-preview: :class:`AgentPoolsOperations` - * 2024-03-02-preview: :class:`AgentPoolsOperations` - * 2024-04-02-preview: :class:`AgentPoolsOperations` - * 2024-05-01: :class:`AgentPoolsOperations` - * 2024-05-02-preview: :class:`AgentPoolsOperations` - * 2024-06-02-preview: :class:`AgentPoolsOperations` - * 2024-07-01: :class:`AgentPoolsOperations` - """ - api_version = self._get_api_version('agent_pools') - if api_version == '2019-02-01': - from ..v2019_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-04-01': - from ..v2019_04_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-06-01': - from ..v2019_06_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-08-01': - from ..v2019_08_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-10-01': - from ..v2019_10_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2019-11-01': - from ..v2019_11_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-01-01': - from ..v2020_01_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-02-01': - from ..v2020_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-03-01': - from ..v2020_03_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-04-01': - from ..v2020_04_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-06-01': - from ..v2020_06_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-07-01': - from ..v2020_07_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import AgentPoolsOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import AgentPoolsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'agent_pools'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def container_services(self): - """Instance depends on the API version: - - * 2017-07-01: :class:`ContainerServicesOperations` - * 2019-04-01: :class:`ContainerServicesOperations` - """ - api_version = self._get_api_version('container_services') - if api_version == '2017-07-01': - from ..v2017_07_01.aio.operations import ContainerServicesOperations as OperationClass - elif api_version == '2019-04-01': - from ..v2019_04_01.aio.operations import ContainerServicesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'container_services'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def fleet_members(self): - """Instance depends on the API version: - - * 2022-06-02-preview: :class:`FleetMembersOperations` - * 2022-07-02-preview: :class:`FleetMembersOperations` - * 2022-09-02-preview: :class:`FleetMembersOperations` - """ - api_version = self._get_api_version('fleet_members') - if api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import FleetMembersOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import FleetMembersOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import FleetMembersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'fleet_members'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def fleets(self): - """Instance depends on the API version: - - * 2022-06-02-preview: :class:`FleetsOperations` - * 2022-07-02-preview: :class:`FleetsOperations` - * 2022-09-02-preview: :class:`FleetsOperations` - """ - api_version = self._get_api_version('fleets') - if api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import FleetsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import FleetsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import FleetsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'fleets'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def load_balancers(self): - """Instance depends on the API version: - - * 2024-03-02-preview: :class:`LoadBalancersOperations` - * 2024-04-02-preview: :class:`LoadBalancersOperations` - * 2024-05-02-preview: :class:`LoadBalancersOperations` - * 2024-06-02-preview: :class:`LoadBalancersOperations` - """ - api_version = self._get_api_version('load_balancers') - if api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import LoadBalancersOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import LoadBalancersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'load_balancers'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def machines(self): - """Instance depends on the API version: - - * 2023-07-02-preview: :class:`MachinesOperations` - * 2023-08-02-preview: :class:`MachinesOperations` - * 2023-09-02-preview: :class:`MachinesOperations` - * 2023-10-02-preview: :class:`MachinesOperations` - * 2023-11-02-preview: :class:`MachinesOperations` - * 2024-01-02-preview: :class:`MachinesOperations` - * 2024-02-02-preview: :class:`MachinesOperations` - * 2024-03-02-preview: :class:`MachinesOperations` - * 2024-04-02-preview: :class:`MachinesOperations` - * 2024-05-02-preview: :class:`MachinesOperations` - * 2024-06-02-preview: :class:`MachinesOperations` - * 2024-07-01: :class:`MachinesOperations` - """ - api_version = self._get_api_version('machines') - if api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import MachinesOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import MachinesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'machines'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def maintenance_configurations(self): - """Instance depends on the API version: - - * 2020-12-01: :class:`MaintenanceConfigurationsOperations` - * 2021-02-01: :class:`MaintenanceConfigurationsOperations` - * 2021-03-01: :class:`MaintenanceConfigurationsOperations` - * 2021-05-01: :class:`MaintenanceConfigurationsOperations` - * 2021-07-01: :class:`MaintenanceConfigurationsOperations` - * 2021-08-01: :class:`MaintenanceConfigurationsOperations` - * 2021-09-01: :class:`MaintenanceConfigurationsOperations` - * 2021-10-01: :class:`MaintenanceConfigurationsOperations` - * 2021-11-01-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-01-01: :class:`MaintenanceConfigurationsOperations` - * 2022-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-02-01: :class:`MaintenanceConfigurationsOperations` - * 2022-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-03-01: :class:`MaintenanceConfigurationsOperations` - * 2022-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-04-01: :class:`MaintenanceConfigurationsOperations` - * 2022-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-06-01: :class:`MaintenanceConfigurationsOperations` - * 2022-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-07-01: :class:`MaintenanceConfigurationsOperations` - * 2022-07-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-08-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-08-03-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-09-01: :class:`MaintenanceConfigurationsOperations` - * 2022-09-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-10-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2022-11-01: :class:`MaintenanceConfigurationsOperations` - * 2022-11-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-01-01: :class:`MaintenanceConfigurationsOperations` - * 2023-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-02-01: :class:`MaintenanceConfigurationsOperations` - * 2023-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-03-01: :class:`MaintenanceConfigurationsOperations` - * 2023-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-04-01: :class:`MaintenanceConfigurationsOperations` - * 2023-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-05-01: :class:`MaintenanceConfigurationsOperations` - * 2023-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-06-01: :class:`MaintenanceConfigurationsOperations` - * 2023-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-07-01: :class:`MaintenanceConfigurationsOperations` - * 2023-07-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-08-01: :class:`MaintenanceConfigurationsOperations` - * 2023-08-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-09-01: :class:`MaintenanceConfigurationsOperations` - * 2023-09-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-10-01: :class:`MaintenanceConfigurationsOperations` - * 2023-10-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2023-11-01: :class:`MaintenanceConfigurationsOperations` - * 2023-11-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-01-01: :class:`MaintenanceConfigurationsOperations` - * 2024-01-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-02-01: :class:`MaintenanceConfigurationsOperations` - * 2024-02-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-03-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-04-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-05-01: :class:`MaintenanceConfigurationsOperations` - * 2024-05-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-06-02-preview: :class:`MaintenanceConfigurationsOperations` - * 2024-07-01: :class:`MaintenanceConfigurationsOperations` - """ - api_version = self._get_api_version('maintenance_configurations') - if api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import MaintenanceConfigurationsOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import MaintenanceConfigurationsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'maintenance_configurations'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def managed_cluster_snapshots(self): - """Instance depends on the API version: - - * 2022-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-07-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-08-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-08-03-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-09-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-10-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2022-11-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-01-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-07-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-08-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-09-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-10-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2023-11-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-01-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-02-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-03-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-04-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-05-02-preview: :class:`ManagedClusterSnapshotsOperations` - * 2024-06-02-preview: :class:`ManagedClusterSnapshotsOperations` - """ - api_version = self._get_api_version('managed_cluster_snapshots') - if api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import ManagedClusterSnapshotsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'managed_cluster_snapshots'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def managed_clusters(self): - """Instance depends on the API version: - - * 2018-03-31: :class:`ManagedClustersOperations` - * 2018-08-01-preview: :class:`ManagedClustersOperations` - * 2019-02-01: :class:`ManagedClustersOperations` - * 2019-04-01: :class:`ManagedClustersOperations` - * 2019-06-01: :class:`ManagedClustersOperations` - * 2019-08-01: :class:`ManagedClustersOperations` - * 2019-10-01: :class:`ManagedClustersOperations` - * 2019-11-01: :class:`ManagedClustersOperations` - * 2020-01-01: :class:`ManagedClustersOperations` - * 2020-02-01: :class:`ManagedClustersOperations` - * 2020-03-01: :class:`ManagedClustersOperations` - * 2020-04-01: :class:`ManagedClustersOperations` - * 2020-06-01: :class:`ManagedClustersOperations` - * 2020-07-01: :class:`ManagedClustersOperations` - * 2020-09-01: :class:`ManagedClustersOperations` - * 2020-11-01: :class:`ManagedClustersOperations` - * 2020-12-01: :class:`ManagedClustersOperations` - * 2021-02-01: :class:`ManagedClustersOperations` - * 2021-03-01: :class:`ManagedClustersOperations` - * 2021-05-01: :class:`ManagedClustersOperations` - * 2021-07-01: :class:`ManagedClustersOperations` - * 2021-08-01: :class:`ManagedClustersOperations` - * 2021-09-01: :class:`ManagedClustersOperations` - * 2021-10-01: :class:`ManagedClustersOperations` - * 2021-11-01-preview: :class:`ManagedClustersOperations` - * 2022-01-01: :class:`ManagedClustersOperations` - * 2022-01-02-preview: :class:`ManagedClustersOperations` - * 2022-02-01: :class:`ManagedClustersOperations` - * 2022-02-02-preview: :class:`ManagedClustersOperations` - * 2022-03-01: :class:`ManagedClustersOperations` - * 2022-03-02-preview: :class:`ManagedClustersOperations` - * 2022-04-01: :class:`ManagedClustersOperations` - * 2022-04-02-preview: :class:`ManagedClustersOperations` - * 2022-05-02-preview: :class:`ManagedClustersOperations` - * 2022-06-01: :class:`ManagedClustersOperations` - * 2022-06-02-preview: :class:`ManagedClustersOperations` - * 2022-07-01: :class:`ManagedClustersOperations` - * 2022-07-02-preview: :class:`ManagedClustersOperations` - * 2022-08-02-preview: :class:`ManagedClustersOperations` - * 2022-08-03-preview: :class:`ManagedClustersOperations` - * 2022-09-01: :class:`ManagedClustersOperations` - * 2022-09-02-preview: :class:`ManagedClustersOperations` - * 2022-10-02-preview: :class:`ManagedClustersOperations` - * 2022-11-01: :class:`ManagedClustersOperations` - * 2022-11-02-preview: :class:`ManagedClustersOperations` - * 2023-01-01: :class:`ManagedClustersOperations` - * 2023-01-02-preview: :class:`ManagedClustersOperations` - * 2023-02-01: :class:`ManagedClustersOperations` - * 2023-02-02-preview: :class:`ManagedClustersOperations` - * 2023-03-01: :class:`ManagedClustersOperations` - * 2023-03-02-preview: :class:`ManagedClustersOperations` - * 2023-04-01: :class:`ManagedClustersOperations` - * 2023-04-02-preview: :class:`ManagedClustersOperations` - * 2023-05-01: :class:`ManagedClustersOperations` - * 2023-05-02-preview: :class:`ManagedClustersOperations` - * 2023-06-01: :class:`ManagedClustersOperations` - * 2023-06-02-preview: :class:`ManagedClustersOperations` - * 2023-07-01: :class:`ManagedClustersOperations` - * 2023-07-02-preview: :class:`ManagedClustersOperations` - * 2023-08-01: :class:`ManagedClustersOperations` - * 2023-08-02-preview: :class:`ManagedClustersOperations` - * 2023-09-01: :class:`ManagedClustersOperations` - * 2023-09-02-preview: :class:`ManagedClustersOperations` - * 2023-10-01: :class:`ManagedClustersOperations` - * 2023-10-02-preview: :class:`ManagedClustersOperations` - * 2023-11-01: :class:`ManagedClustersOperations` - * 2023-11-02-preview: :class:`ManagedClustersOperations` - * 2024-01-01: :class:`ManagedClustersOperations` - * 2024-01-02-preview: :class:`ManagedClustersOperations` - * 2024-02-01: :class:`ManagedClustersOperations` - * 2024-02-02-preview: :class:`ManagedClustersOperations` - * 2024-03-02-preview: :class:`ManagedClustersOperations` - * 2024-04-02-preview: :class:`ManagedClustersOperations` - * 2024-05-01: :class:`ManagedClustersOperations` - * 2024-05-02-preview: :class:`ManagedClustersOperations` - * 2024-06-02-preview: :class:`ManagedClustersOperations` - * 2024-07-01: :class:`ManagedClustersOperations` - """ - api_version = self._get_api_version('managed_clusters') - if api_version == '2018-03-31': - from ..v2018_03_31.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2018-08-01-preview': - from ..v2018_08_01_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-02-01': - from ..v2019_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-04-01': - from ..v2019_04_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-06-01': - from ..v2019_06_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-08-01': - from ..v2019_08_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-10-01': - from ..v2019_10_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2019-11-01': - from ..v2019_11_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-01-01': - from ..v2020_01_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-02-01': - from ..v2020_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-03-01': - from ..v2020_03_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-04-01': - from ..v2020_04_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-06-01': - from ..v2020_06_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-07-01': - from ..v2020_07_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import ManagedClustersOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import ManagedClustersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'managed_clusters'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def open_shift_managed_clusters(self): - """Instance depends on the API version: - - * 2018-09-30-preview: :class:`OpenShiftManagedClustersOperations` - * 2019-04-30: :class:`OpenShiftManagedClustersOperations` - * 2019-09-30-preview: :class:`OpenShiftManagedClustersOperations` - * 2019-10-27-preview: :class:`OpenShiftManagedClustersOperations` - """ - api_version = self._get_api_version('open_shift_managed_clusters') - if api_version == '2018-09-30-preview': - from ..v2018_09_30_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-04-30': - from ..v2019_04_30.aio.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-09-30-preview': - from ..v2019_09_30_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass - elif api_version == '2019-10-27-preview': - from ..v2019_10_27_preview.aio.operations import OpenShiftManagedClustersOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'open_shift_managed_clusters'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def operation_status_result(self): - """Instance depends on the API version: - - * 2023-10-02-preview: :class:`OperationStatusResultOperations` - * 2023-11-02-preview: :class:`OperationStatusResultOperations` - * 2024-01-02-preview: :class:`OperationStatusResultOperations` - * 2024-02-02-preview: :class:`OperationStatusResultOperations` - * 2024-03-02-preview: :class:`OperationStatusResultOperations` - * 2024-04-02-preview: :class:`OperationStatusResultOperations` - * 2024-05-02-preview: :class:`OperationStatusResultOperations` - * 2024-06-02-preview: :class:`OperationStatusResultOperations` - """ - api_version = self._get_api_version('operation_status_result') - if api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import OperationStatusResultOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'operation_status_result'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def operations(self): - """Instance depends on the API version: - - * 2018-03-31: :class:`Operations` - * 2018-08-01-preview: :class:`Operations` - * 2019-02-01: :class:`Operations` - * 2019-04-01: :class:`Operations` - * 2019-06-01: :class:`Operations` - * 2019-08-01: :class:`Operations` - * 2019-10-01: :class:`Operations` - * 2019-11-01: :class:`Operations` - * 2020-01-01: :class:`Operations` - * 2020-02-01: :class:`Operations` - * 2020-03-01: :class:`Operations` - * 2020-04-01: :class:`Operations` - * 2020-06-01: :class:`Operations` - * 2020-07-01: :class:`Operations` - * 2020-09-01: :class:`Operations` - * 2020-11-01: :class:`Operations` - * 2020-12-01: :class:`Operations` - * 2021-02-01: :class:`Operations` - * 2021-03-01: :class:`Operations` - * 2021-05-01: :class:`Operations` - * 2021-07-01: :class:`Operations` - * 2021-08-01: :class:`Operations` - * 2021-09-01: :class:`Operations` - * 2021-10-01: :class:`Operations` - * 2021-11-01-preview: :class:`Operations` - * 2022-01-01: :class:`Operations` - * 2022-01-02-preview: :class:`Operations` - * 2022-02-01: :class:`Operations` - * 2022-02-02-preview: :class:`Operations` - * 2022-03-01: :class:`Operations` - * 2022-03-02-preview: :class:`Operations` - * 2022-04-01: :class:`Operations` - * 2022-04-02-preview: :class:`Operations` - * 2022-05-02-preview: :class:`Operations` - * 2022-06-01: :class:`Operations` - * 2022-06-02-preview: :class:`Operations` - * 2022-07-01: :class:`Operations` - * 2022-07-02-preview: :class:`Operations` - * 2022-08-02-preview: :class:`Operations` - * 2022-08-03-preview: :class:`Operations` - * 2022-09-01: :class:`Operations` - * 2022-09-02-preview: :class:`Operations` - * 2022-10-02-preview: :class:`Operations` - * 2022-11-01: :class:`Operations` - * 2022-11-02-preview: :class:`Operations` - * 2023-01-01: :class:`Operations` - * 2023-01-02-preview: :class:`Operations` - * 2023-02-01: :class:`Operations` - * 2023-02-02-preview: :class:`Operations` - * 2023-03-01: :class:`Operations` - * 2023-03-02-preview: :class:`Operations` - * 2023-04-01: :class:`Operations` - * 2023-04-02-preview: :class:`Operations` - * 2023-05-01: :class:`Operations` - * 2023-05-02-preview: :class:`Operations` - * 2023-06-01: :class:`Operations` - * 2023-06-02-preview: :class:`Operations` - * 2023-07-01: :class:`Operations` - * 2023-07-02-preview: :class:`Operations` - * 2023-08-01: :class:`Operations` - * 2023-08-02-preview: :class:`Operations` - * 2023-09-01: :class:`Operations` - * 2023-09-02-preview: :class:`Operations` - * 2023-10-01: :class:`Operations` - * 2023-10-02-preview: :class:`Operations` - * 2023-11-01: :class:`Operations` - * 2023-11-02-preview: :class:`Operations` - * 2024-01-01: :class:`Operations` - * 2024-01-02-preview: :class:`Operations` - * 2024-02-01: :class:`Operations` - * 2024-02-02-preview: :class:`Operations` - * 2024-03-02-preview: :class:`Operations` - * 2024-04-02-preview: :class:`Operations` - * 2024-05-01: :class:`Operations` - * 2024-05-02-preview: :class:`Operations` - * 2024-06-02-preview: :class:`Operations` - * 2024-07-01: :class:`Operations` - """ - api_version = self._get_api_version('operations') - if api_version == '2018-03-31': - from ..v2018_03_31.aio.operations import Operations as OperationClass - elif api_version == '2018-08-01-preview': - from ..v2018_08_01_preview.aio.operations import Operations as OperationClass - elif api_version == '2019-02-01': - from ..v2019_02_01.aio.operations import Operations as OperationClass - elif api_version == '2019-04-01': - from ..v2019_04_01.aio.operations import Operations as OperationClass - elif api_version == '2019-06-01': - from ..v2019_06_01.aio.operations import Operations as OperationClass - elif api_version == '2019-08-01': - from ..v2019_08_01.aio.operations import Operations as OperationClass - elif api_version == '2019-10-01': - from ..v2019_10_01.aio.operations import Operations as OperationClass - elif api_version == '2019-11-01': - from ..v2019_11_01.aio.operations import Operations as OperationClass - elif api_version == '2020-01-01': - from ..v2020_01_01.aio.operations import Operations as OperationClass - elif api_version == '2020-02-01': - from ..v2020_02_01.aio.operations import Operations as OperationClass - elif api_version == '2020-03-01': - from ..v2020_03_01.aio.operations import Operations as OperationClass - elif api_version == '2020-04-01': - from ..v2020_04_01.aio.operations import Operations as OperationClass - elif api_version == '2020-06-01': - from ..v2020_06_01.aio.operations import Operations as OperationClass - elif api_version == '2020-07-01': - from ..v2020_07_01.aio.operations import Operations as OperationClass - elif api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import Operations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import Operations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import Operations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import Operations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import Operations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import Operations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import Operations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import Operations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import Operations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import Operations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import Operations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import Operations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import Operations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import Operations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import Operations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import Operations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import Operations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import Operations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import Operations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import Operations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import Operations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import Operations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import Operations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import Operations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import Operations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import Operations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import Operations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import Operations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import Operations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import Operations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import Operations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import Operations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import Operations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import Operations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'operations'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def private_endpoint_connections(self): - """Instance depends on the API version: - - * 2020-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2020-12-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-08-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-10-01: :class:`PrivateEndpointConnectionsOperations` - * 2021-11-01-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-04-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-07-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-08-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-08-03-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-09-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-10-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2022-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2022-11-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-03-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-04-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-06-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-07-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-07-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-08-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-08-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-09-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-09-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-10-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-10-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2023-11-01: :class:`PrivateEndpointConnectionsOperations` - * 2023-11-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-01-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-01-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-02-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-02-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-03-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-04-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-05-01: :class:`PrivateEndpointConnectionsOperations` - * 2024-05-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-06-02-preview: :class:`PrivateEndpointConnectionsOperations` - * 2024-07-01: :class:`PrivateEndpointConnectionsOperations` - """ - api_version = self._get_api_version('private_endpoint_connections') - if api_version == '2020-06-01': - from ..v2020_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-07-01': - from ..v2020_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def private_link_resources(self): - """Instance depends on the API version: - - * 2020-09-01: :class:`PrivateLinkResourcesOperations` - * 2020-11-01: :class:`PrivateLinkResourcesOperations` - * 2020-12-01: :class:`PrivateLinkResourcesOperations` - * 2021-02-01: :class:`PrivateLinkResourcesOperations` - * 2021-03-01: :class:`PrivateLinkResourcesOperations` - * 2021-05-01: :class:`PrivateLinkResourcesOperations` - * 2021-07-01: :class:`PrivateLinkResourcesOperations` - * 2021-08-01: :class:`PrivateLinkResourcesOperations` - * 2021-09-01: :class:`PrivateLinkResourcesOperations` - * 2021-10-01: :class:`PrivateLinkResourcesOperations` - * 2021-11-01-preview: :class:`PrivateLinkResourcesOperations` - * 2022-01-01: :class:`PrivateLinkResourcesOperations` - * 2022-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-02-01: :class:`PrivateLinkResourcesOperations` - * 2022-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-03-01: :class:`PrivateLinkResourcesOperations` - * 2022-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-04-01: :class:`PrivateLinkResourcesOperations` - * 2022-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-06-01: :class:`PrivateLinkResourcesOperations` - * 2022-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-07-01: :class:`PrivateLinkResourcesOperations` - * 2022-07-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-08-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-08-03-preview: :class:`PrivateLinkResourcesOperations` - * 2022-09-01: :class:`PrivateLinkResourcesOperations` - * 2022-09-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-10-02-preview: :class:`PrivateLinkResourcesOperations` - * 2022-11-01: :class:`PrivateLinkResourcesOperations` - * 2022-11-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-01-01: :class:`PrivateLinkResourcesOperations` - * 2023-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-02-01: :class:`PrivateLinkResourcesOperations` - * 2023-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-03-01: :class:`PrivateLinkResourcesOperations` - * 2023-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-04-01: :class:`PrivateLinkResourcesOperations` - * 2023-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-05-01: :class:`PrivateLinkResourcesOperations` - * 2023-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-06-01: :class:`PrivateLinkResourcesOperations` - * 2023-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-07-01: :class:`PrivateLinkResourcesOperations` - * 2023-07-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-08-01: :class:`PrivateLinkResourcesOperations` - * 2023-08-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-09-01: :class:`PrivateLinkResourcesOperations` - * 2023-09-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-10-01: :class:`PrivateLinkResourcesOperations` - * 2023-10-02-preview: :class:`PrivateLinkResourcesOperations` - * 2023-11-01: :class:`PrivateLinkResourcesOperations` - * 2023-11-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-01-01: :class:`PrivateLinkResourcesOperations` - * 2024-01-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-02-01: :class:`PrivateLinkResourcesOperations` - * 2024-02-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-03-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-04-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-05-01: :class:`PrivateLinkResourcesOperations` - * 2024-05-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-06-02-preview: :class:`PrivateLinkResourcesOperations` - * 2024-07-01: :class:`PrivateLinkResourcesOperations` - """ - api_version = self._get_api_version('private_link_resources') - if api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import PrivateLinkResourcesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - @property - def resolve_private_link_service_id(self): - """Instance depends on the API version: - - * 2020-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2020-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2020-12-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-08-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-10-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2021-11-01-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-04-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-06-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-08-03-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2022-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-03-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-04-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-06-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-07-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-08-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-08-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-09-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-09-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-10-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-10-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-11-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2023-11-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-01-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-01-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-02-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-02-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-03-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-04-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-05-01: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-05-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-06-02-preview: :class:`ResolvePrivateLinkServiceIdOperations` - * 2024-07-01: :class:`ResolvePrivateLinkServiceIdOperations` - """ - api_version = self._get_api_version('resolve_private_link_service_id') - if api_version == '2020-09-01': - from ..v2020_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2020-11-01': - from ..v2020_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2020-12-01': - from ..v2020_12_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-02-01': - from ..v2021_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-03-01': - from ..v2021_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-05-01': - from ..v2021_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-07-01': - from ..v2021_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import ResolvePrivateLinkServiceIdOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'resolve_private_link_service_id'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) + self.managed_clusters = ManagedClustersOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.maintenance_configurations = MaintenanceConfigurationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.managed_namespaces = ManagedNamespacesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize) + self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.trusted_access_roles = TrustedAccessRolesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize) - @property - def snapshots(self): - """Instance depends on the API version: + def _send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. - * 2021-08-01: :class:`SnapshotsOperations` - * 2021-09-01: :class:`SnapshotsOperations` - * 2021-10-01: :class:`SnapshotsOperations` - * 2021-11-01-preview: :class:`SnapshotsOperations` - * 2022-01-01: :class:`SnapshotsOperations` - * 2022-01-02-preview: :class:`SnapshotsOperations` - * 2022-02-01: :class:`SnapshotsOperations` - * 2022-02-02-preview: :class:`SnapshotsOperations` - * 2022-03-01: :class:`SnapshotsOperations` - * 2022-03-02-preview: :class:`SnapshotsOperations` - * 2022-04-01: :class:`SnapshotsOperations` - * 2022-04-02-preview: :class:`SnapshotsOperations` - * 2022-05-02-preview: :class:`SnapshotsOperations` - * 2022-06-01: :class:`SnapshotsOperations` - * 2022-06-02-preview: :class:`SnapshotsOperations` - * 2022-07-01: :class:`SnapshotsOperations` - * 2022-07-02-preview: :class:`SnapshotsOperations` - * 2022-08-02-preview: :class:`SnapshotsOperations` - * 2022-08-03-preview: :class:`SnapshotsOperations` - * 2022-09-01: :class:`SnapshotsOperations` - * 2022-09-02-preview: :class:`SnapshotsOperations` - * 2022-10-02-preview: :class:`SnapshotsOperations` - * 2022-11-01: :class:`SnapshotsOperations` - * 2022-11-02-preview: :class:`SnapshotsOperations` - * 2023-01-01: :class:`SnapshotsOperations` - * 2023-01-02-preview: :class:`SnapshotsOperations` - * 2023-02-01: :class:`SnapshotsOperations` - * 2023-02-02-preview: :class:`SnapshotsOperations` - * 2023-03-01: :class:`SnapshotsOperations` - * 2023-03-02-preview: :class:`SnapshotsOperations` - * 2023-04-01: :class:`SnapshotsOperations` - * 2023-04-02-preview: :class:`SnapshotsOperations` - * 2023-05-01: :class:`SnapshotsOperations` - * 2023-05-02-preview: :class:`SnapshotsOperations` - * 2023-06-01: :class:`SnapshotsOperations` - * 2023-06-02-preview: :class:`SnapshotsOperations` - * 2023-07-01: :class:`SnapshotsOperations` - * 2023-07-02-preview: :class:`SnapshotsOperations` - * 2023-08-01: :class:`SnapshotsOperations` - * 2023-08-02-preview: :class:`SnapshotsOperations` - * 2023-09-01: :class:`SnapshotsOperations` - * 2023-09-02-preview: :class:`SnapshotsOperations` - * 2023-10-01: :class:`SnapshotsOperations` - * 2023-10-02-preview: :class:`SnapshotsOperations` - * 2023-11-01: :class:`SnapshotsOperations` - * 2023-11-02-preview: :class:`SnapshotsOperations` - * 2024-01-01: :class:`SnapshotsOperations` - * 2024-01-02-preview: :class:`SnapshotsOperations` - * 2024-02-01: :class:`SnapshotsOperations` - * 2024-02-02-preview: :class:`SnapshotsOperations` - * 2024-03-02-preview: :class:`SnapshotsOperations` - * 2024-04-02-preview: :class:`SnapshotsOperations` - * 2024-05-01: :class:`SnapshotsOperations` - * 2024-05-02-preview: :class:`SnapshotsOperations` - * 2024-06-02-preview: :class:`SnapshotsOperations` - * 2024-07-01: :class:`SnapshotsOperations` - """ - api_version = self._get_api_version('snapshots') - if api_version == '2021-08-01': - from ..v2021_08_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-09-01': - from ..v2021_09_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-10-01': - from ..v2021_10_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2021-11-01-preview': - from ..v2021_11_01_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-01-01': - from ..v2022_01_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-01-02-preview': - from ..v2022_01_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-02-01': - from ..v2022_02_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-02-02-preview': - from ..v2022_02_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-03-01': - from ..v2022_03_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-03-02-preview': - from ..v2022_03_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-04-01': - from ..v2022_04_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-06-01': - from ..v2022_06_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-07-01': - from ..v2022_07_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-09-01': - from ..v2022_09_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-11-01': - from ..v2022_11_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-01-01': - from ..v2023_01_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-02-01': - from ..v2023_02_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-03-01': - from ..v2023_03_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-04-01': - from ..v2023_04_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-05-01': - from ..v2023_05_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-06-01': - from ..v2023_06_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-07-01': - from ..v2023_07_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-08-01': - from ..v2023_08_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import SnapshotsOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import SnapshotsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + - @property - def trusted_access_role_bindings(self): - """Instance depends on the API version: + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - * 2022-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-08-03-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2022-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-07-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-08-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-09-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-09-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-10-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-10-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2023-11-01: :class:`TrustedAccessRoleBindingsOperations` - * 2023-11-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-01-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-01-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-02-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-02-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-03-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-04-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-05-01: :class:`TrustedAccessRoleBindingsOperations` - * 2024-05-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-06-02-preview: :class:`TrustedAccessRoleBindingsOperations` - * 2024-07-01: :class:`TrustedAccessRoleBindingsOperations` + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse """ - api_version = self._get_api_version('trusted_access_role_bindings') - if api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import TrustedAccessRoleBindingsOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'trusted_access_role_bindings'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - @property - def trusted_access_roles(self): - """Instance depends on the API version: + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - * 2022-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-07-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-08-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-08-03-preview: :class:`TrustedAccessRolesOperations` - * 2022-09-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-10-02-preview: :class:`TrustedAccessRolesOperations` - * 2022-11-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-01-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-02-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-03-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-07-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-08-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-09-01: :class:`TrustedAccessRolesOperations` - * 2023-09-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-10-01: :class:`TrustedAccessRolesOperations` - * 2023-10-02-preview: :class:`TrustedAccessRolesOperations` - * 2023-11-01: :class:`TrustedAccessRolesOperations` - * 2023-11-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-01-01: :class:`TrustedAccessRolesOperations` - * 2024-01-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-02-01: :class:`TrustedAccessRolesOperations` - * 2024-02-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-03-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-04-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-05-01: :class:`TrustedAccessRolesOperations` - * 2024-05-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-06-02-preview: :class:`TrustedAccessRolesOperations` - * 2024-07-01: :class:`TrustedAccessRolesOperations` - """ - api_version = self._get_api_version('trusted_access_roles') - if api_version == '2022-04-02-preview': - from ..v2022_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-05-02-preview': - from ..v2022_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-06-02-preview': - from ..v2022_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-07-02-preview': - from ..v2022_07_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-08-02-preview': - from ..v2022_08_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-08-03-preview': - from ..v2022_08_03_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-09-02-preview': - from ..v2022_09_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-10-02-preview': - from ..v2022_10_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2022-11-02-preview': - from ..v2022_11_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-01-02-preview': - from ..v2023_01_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-02-02-preview': - from ..v2023_02_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-03-02-preview': - from ..v2023_03_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-04-02-preview': - from ..v2023_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-05-02-preview': - from ..v2023_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-06-02-preview': - from ..v2023_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-07-02-preview': - from ..v2023_07_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-08-02-preview': - from ..v2023_08_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-09-01': - from ..v2023_09_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-09-02-preview': - from ..v2023_09_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-10-01': - from ..v2023_10_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-10-02-preview': - from ..v2023_10_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-11-01': - from ..v2023_11_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2023-11-02-preview': - from ..v2023_11_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-01-01': - from ..v2024_01_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-01-02-preview': - from ..v2024_01_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-02-01': - from ..v2024_02_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-02-02-preview': - from ..v2024_02_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-03-02-preview': - from ..v2024_03_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-04-02-preview': - from ..v2024_04_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-05-01': - from ..v2024_05_01.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-05-02-preview': - from ..v2024_05_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-06-02-preview': - from ..v2024_06_02_preview.aio.operations import TrustedAccessRolesOperations as OperationClass - elif api_version == '2024-07-01': - from ..v2024_07_01.aio.operations import TrustedAccessRolesOperations as OperationClass - else: - raise ValueError("API version {} does not have operation group 'trusted_access_roles'".format(api_version)) - self._config.api_version = api_version - return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version) - - async def close(self): + async def close(self) -> None: await self._client.close() - async def __aenter__(self): + + async def __aenter__(self) -> Self: await self._client.__aenter__() return self - async def __aexit__(self, *exc_details): + + async def __aexit__(self, *exc_details: Any) -> None: await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_patch.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_patch.py index f7dd3251033..8bcb627aa47 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_patch.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/_patch.py @@ -1,7 +1,8 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/__init__.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/__init__.py index d59e9e8a9f1..70b2a677e7b 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/__init__.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/__init__.py @@ -5,27 +5,35 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._operations import Operations -from ._managed_clusters_operations import ManagedClustersOperations -from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations -from ._agent_pools_operations import AgentPoolsOperations -from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations -from ._private_link_resources_operations import PrivateLinkResourcesOperations -from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations -from ._snapshots_operations import SnapshotsOperations -from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations -from ._trusted_access_roles_operations import TrustedAccessRolesOperations -from ._machines_operations import MachinesOperations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import Operations # type: ignore +from ._managed_clusters_operations import ManagedClustersOperations # type: ignore +from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations # type: ignore +from ._managed_namespaces_operations import ManagedNamespacesOperations # type: ignore +from ._agent_pools_operations import AgentPoolsOperations # type: ignore +from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore +from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore +from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations # type: ignore +from ._snapshots_operations import SnapshotsOperations # type: ignore +from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations # type: ignore +from ._trusted_access_roles_operations import TrustedAccessRolesOperations # type: ignore +from ._machines_operations import MachinesOperations # type: ignore from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ "Operations", "ManagedClustersOperations", "MaintenanceConfigurationsOperations", + "ManagedNamespacesOperations", "AgentPoolsOperations", "PrivateEndpointConnectionsOperations", "PrivateLinkResourcesOperations", @@ -35,5 +43,5 @@ "TrustedAccessRolesOperations", "MachinesOperations", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_agent_pools_operations.py similarity index 85% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_agent_pools_operations.py index 509be147632..32e36defb7e 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_agent_pools_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_agent_pools_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,6 +33,7 @@ from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._agent_pools_operations import ( build_abort_latest_operation_request, build_create_or_update_request, @@ -43,13 +45,11 @@ build_list_request, build_upgrade_node_image_version_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class AgentPoolsOperations: @@ -58,7 +58,7 @@ class AgentPoolsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`agent_pools` attribute. """ @@ -66,16 +66,15 @@ class AgentPoolsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") async def _abort_latest_operation_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -86,7 +85,7 @@ async def _abort_latest_operation_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_abort_latest_operation_request( @@ -114,7 +113,11 @@ async def _abort_latest_operation_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -154,7 +157,7 @@ async def begin_abort_latest_operation( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -195,7 +198,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore @distributed_trace - def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterable["_models.AgentPool"]: + def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncItemPaged["_models.AgentPool"]: """Gets a list of agent pools in the specified managed cluster. Gets a list of agent pools in the specified managed cluster. @@ -206,17 +209,16 @@ def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> A :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: An iterator like instance of either AgentPool or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -246,7 +248,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -272,7 +274,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -294,10 +300,10 @@ async def get( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: AgentPool or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :rtype: ~azure.mgmt.containerservice.models.AgentPool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -308,7 +314,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) _request = build_get_request( @@ -331,7 +337,11 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPool", pipeline_response.http_response) @@ -346,9 +356,11 @@ async def _create_or_update_initial( resource_name: str, agent_pool_name: str, parameters: Union[_models.AgentPool, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -359,7 +371,7 @@ async def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -376,6 +388,8 @@ async def _create_or_update_initial( resource_name=resource_name, agent_pool_name=agent_pool_name, subscription_id=self._config.subscription_id, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, json=_json, @@ -399,7 +413,11 @@ async def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -415,6 +433,8 @@ async def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: _models.AgentPool, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -431,14 +451,19 @@ async def begin_create_or_update( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :param parameters: The agent pool to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :type parameters: ~azure.mgmt.containerservice.models.AgentPool + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -449,6 +474,8 @@ async def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -466,13 +493,18 @@ async def begin_create_or_update( :type agent_pool_name: str :param parameters: The agent pool to create or update. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -483,6 +515,8 @@ async def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: Union[_models.AgentPool, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> AsyncLROPoller[_models.AgentPool]: """Creates or updates an agent pool in the specified managed cluster. @@ -498,17 +532,22 @@ async def begin_create_or_update( :type agent_pool_name: str :param parameters: The agent pool to create or update. Is either a AgentPool type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.AgentPool or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -520,6 +559,8 @@ async def begin_create_or_update( resource_name=resource_name, agent_pool_name=agent_pool_name, parameters=parameters, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -554,9 +595,15 @@ def get_long_running_output(pipeline_response): ) async def _delete_initial( - self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + ignore_pod_disruption_budget: Optional[bool] = None, + if_match: Optional[str] = None, + **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -567,7 +614,7 @@ async def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -575,6 +622,8 @@ async def _delete_initial( resource_name=resource_name, agent_pool_name=agent_pool_name, subscription_id=self._config.subscription_id, + ignore_pod_disruption_budget=ignore_pod_disruption_budget, + if_match=if_match, api_version=api_version, headers=_headers, params=_params, @@ -595,7 +644,11 @@ async def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -610,7 +663,13 @@ async def _delete_initial( @distributed_trace_async async def begin_delete( - self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + ignore_pod_disruption_budget: Optional[bool] = None, + if_match: Optional[str] = None, + **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes an agent pool in the specified managed cluster. @@ -623,6 +682,12 @@ async def begin_delete( :type resource_name: str :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str + :param ignore_pod_disruption_budget: ignore-pod-disruption-budget=true to delete those pods on + a node without considering Pod Disruption Budget. Default value is None. + :type ignore_pod_disruption_budget: bool + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -630,7 +695,7 @@ async def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -640,6 +705,8 @@ async def begin_delete( resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, + ignore_pod_disruption_budget=ignore_pod_disruption_budget, + if_match=if_match, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, @@ -684,10 +751,10 @@ async def get_upgrade_profile( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: AgentPoolUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.AgentPoolUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -698,7 +765,7 @@ async def get_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_upgrade_profile_request( @@ -721,7 +788,11 @@ async def get_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response) @@ -738,7 +809,7 @@ async def _delete_machines_initial( machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -749,7 +820,7 @@ async def _delete_machines_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -789,7 +860,10 @@ async def _delete_machines_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} @@ -825,8 +899,7 @@ async def begin_delete_machines( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :param machines: A list of machines from the agent pool to be deleted. Required. - :type machines: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter + :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -889,8 +962,8 @@ async def begin_delete_machines( :type agent_pool_name: str :param machines: A list of machines from the agent pool to be deleted. Is either a AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required. - :type machines: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter or IO[bytes] + :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter or + IO[bytes] :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -898,7 +971,7 @@ async def begin_delete_machines( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -955,10 +1028,10 @@ async def get_available_agent_pool_versions( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: AgentPoolAvailableVersions or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersions + :rtype: ~azure.mgmt.containerservice.models.AgentPoolAvailableVersions :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -969,7 +1042,7 @@ async def get_available_agent_pool_versions( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None) _request = build_get_available_agent_pool_versions_request( @@ -991,7 +1064,11 @@ async def get_available_agent_pool_versions( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response) @@ -1003,7 +1080,7 @@ async def get_available_agent_pool_versions( async def _upgrade_node_image_version_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1014,7 +1091,7 @@ async def _upgrade_node_image_version_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_upgrade_node_image_version_request( @@ -1042,7 +1119,11 @@ async def _upgrade_node_image_version_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1076,14 +1157,13 @@ async def begin_upgrade_node_image_version( :type agent_pool_name: str :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_machines_operations.py similarity index 81% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_machines_operations.py index d52176e2962..ed5c1c39eb3 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_machines_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_machines_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -27,14 +27,13 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._machines_operations import build_get_request, build_list_request +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class MachinesOperations: @@ -43,7 +42,7 @@ class MachinesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`machines` attribute. """ @@ -51,16 +50,15 @@ class MachinesOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any - ) -> AsyncIterable["_models.Machine"]: + ) -> AsyncItemPaged["_models.Machine"]: """Gets a list of machines in the specified agent pool. Gets a list of machines in the specified agent pool. @@ -73,17 +71,16 @@ def list( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: An iterator like instance of either Machine or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Machine] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -114,7 +111,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -140,7 +137,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -164,10 +165,10 @@ async def get( :param machine_name: host name of the machine. Required. :type machine_name: str :return: Machine or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Machine + :rtype: ~azure.mgmt.containerservice.models.Machine :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -178,7 +179,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.Machine] = kwargs.pop("cls", None) _request = build_get_request( @@ -202,7 +203,11 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Machine", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_maintenance_configurations_operations.py similarity index 81% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_maintenance_configurations_operations.py index aab6a21a3cc..e11d52fc87f 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_maintenance_configurations_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_maintenance_configurations_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +5,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -28,19 +28,18 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._maintenance_configurations_operations import ( build_create_or_update_request, build_delete_request, build_get_request, build_list_by_managed_cluster_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class MaintenanceConfigurationsOperations: @@ -49,7 +48,7 @@ class MaintenanceConfigurationsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`maintenance_configurations` attribute. """ @@ -57,16 +56,15 @@ class MaintenanceConfigurationsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list_by_managed_cluster( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> AsyncIterable["_models.MaintenanceConfiguration"]: + ) -> AsyncItemPaged["_models.MaintenanceConfiguration"]: """Gets a list of maintenance configurations in the specified managed cluster. Gets a list of maintenance configurations in the specified managed cluster. @@ -79,16 +77,16 @@ def list_by_managed_cluster( :return: An iterator like instance of either MaintenanceConfiguration or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MaintenanceConfiguration] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -118,7 +116,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -144,7 +142,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -163,13 +165,14 @@ async def get( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -180,7 +183,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) _request = build_get_request( @@ -203,7 +206,11 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) @@ -232,15 +239,16 @@ async def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ @@ -264,7 +272,8 @@ async def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Required. :type parameters: IO[bytes] @@ -272,7 +281,7 @@ async def create_or_update( Default value is "application/json". :paramtype content_type: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ @@ -294,17 +303,17 @@ async def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Is either a MaintenanceConfiguration type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration or IO[bytes] :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -315,7 +324,7 @@ async def create_or_update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) @@ -350,7 +359,11 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) @@ -360,9 +373,7 @@ async def create_or_update( return deserialized # type: ignore @distributed_trace_async - async def delete( # pylint: disable=inconsistent-return-statements - self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any - ) -> None: + async def delete(self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any) -> None: """Deletes a maintenance configuration. Deletes a maintenance configuration. @@ -372,13 +383,14 @@ async def delete( # pylint: disable=inconsistent-return-statements :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :return: None or the result of cls(response) :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -389,7 +401,7 @@ async def delete( # pylint: disable=inconsistent-return-statements _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) _request = build_delete_request( @@ -412,7 +424,11 @@ async def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_clusters_operations.py similarity index 87% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_clusters_operations.py index 1b6b663949f..69e06849da8 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_managed_clusters_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_clusters_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,6 +33,7 @@ from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._managed_clusters_operations import ( build_abort_latest_operation_request, build_create_or_update_request, @@ -60,13 +62,11 @@ build_stop_request, build_update_tags_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class ManagedClustersOperations: # pylint: disable=too-many-public-methods @@ -75,7 +75,7 @@ class ManagedClustersOperations: # pylint: disable=too-many-public-methods **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`managed_clusters` attribute. """ @@ -83,11 +83,10 @@ class ManagedClustersOperations: # pylint: disable=too-many-public-methods def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult: @@ -99,10 +98,10 @@ async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _model :param location: The name of the Azure region. Required. :type location: str :return: KubernetesVersionListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionListResult + :rtype: ~azure.mgmt.containerservice.models.KubernetesVersionListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -113,7 +112,7 @@ async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _model _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None) _request = build_list_kubernetes_versions_request( @@ -134,7 +133,11 @@ async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _model if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response) @@ -144,23 +147,23 @@ async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _model return deserialized # type: ignore @distributed_trace - def list(self, **kwargs: Any) -> AsyncIterable["_models.ManagedCluster"]: + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.ManagedCluster"]: """Gets a list of managed clusters in the specified subscription. Gets a list of managed clusters in the specified subscription. :return: An iterator like instance of either ManagedCluster or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -188,7 +191,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -214,7 +217,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -223,7 +230,7 @@ async def get_next(next_link=None): @distributed_trace def list_by_resource_group( self, resource_group_name: str, **kwargs: Any - ) -> AsyncIterable["_models.ManagedCluster"]: + ) -> AsyncItemPaged["_models.ManagedCluster"]: """Lists managed clusters in the specified subscription and resource group. Lists managed clusters in the specified subscription and resource group. @@ -233,16 +240,16 @@ def list_by_resource_group( :type resource_group_name: str :return: An iterator like instance of either ManagedCluster or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -271,7 +278,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -297,7 +304,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -317,10 +328,10 @@ async def get_upgrade_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: ManagedClusterUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.ManagedClusterUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -331,7 +342,7 @@ async def get_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_upgrade_profile_request( @@ -353,7 +364,11 @@ async def get_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response) @@ -381,10 +396,10 @@ async def get_access_profile( :param role_name: The name of the role for managed cluster accessProfile resource. Required. :type role_name: str :return: ManagedClusterAccessProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAccessProfile + :rtype: ~azure.mgmt.containerservice.models.ManagedClusterAccessProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -395,7 +410,7 @@ async def get_access_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None) _request = build_get_access_profile_request( @@ -418,7 +433,11 @@ async def get_access_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response) @@ -443,10 +462,10 @@ async def list_cluster_admin_credentials( :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. :type server_fqdn: str :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -457,7 +476,7 @@ async def list_cluster_admin_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_admin_credentials_request( @@ -480,7 +499,11 @@ async def list_cluster_admin_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -513,12 +536,12 @@ async def list_cluster_user_credentials( 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and "exec". Default value is None. - :type format: str or ~azure.mgmt.containerservice.v2024_07_01.models.Format + :type format: str or ~azure.mgmt.containerservice.models.Format :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -529,7 +552,7 @@ async def list_cluster_user_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_user_credentials_request( @@ -553,7 +576,11 @@ async def list_cluster_user_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -578,10 +605,10 @@ async def list_cluster_monitoring_user_credentials( :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. :type server_fqdn: str :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -592,7 +619,7 @@ async def list_cluster_monitoring_user_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_monitoring_user_credentials_request( @@ -615,7 +642,11 @@ async def list_cluster_monitoring_user_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -636,10 +667,10 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: ManagedCluster or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :rtype: ~azure.mgmt.containerservice.models.ManagedCluster :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -650,7 +681,7 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) _request = build_get_request( @@ -672,7 +703,11 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) @@ -686,9 +721,11 @@ async def _create_or_update_initial( resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -699,7 +736,7 @@ async def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -715,6 +752,8 @@ async def _create_or_update_initial( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, json=_json, @@ -738,7 +777,11 @@ async def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -753,6 +796,8 @@ async def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: _models.ManagedCluster, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -767,14 +812,19 @@ async def begin_create_or_update( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The managed cluster to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -784,6 +834,8 @@ async def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -799,13 +851,18 @@ async def begin_create_or_update( :type resource_name: str :param parameters: The managed cluster to create or update. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -815,6 +872,8 @@ async def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> AsyncLROPoller[_models.ManagedCluster]: """Creates or updates a managed cluster. @@ -828,17 +887,22 @@ async def begin_create_or_update( :type resource_name: str :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -849,6 +913,8 @@ async def begin_create_or_update( resource_group_name=resource_group_name, resource_name=resource_name, parameters=parameters, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -887,9 +953,10 @@ async def _update_tags_initial( resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO[bytes]], + if_match: Optional[str] = None, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -900,7 +967,7 @@ async def _update_tags_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -916,6 +983,7 @@ async def _update_tags_initial( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, api_version=api_version, content_type=content_type, json=_json, @@ -939,7 +1007,11 @@ async def _update_tags_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -954,6 +1026,7 @@ async def begin_update_tags( resource_group_name: str, resource_name: str, parameters: _models.TagsObject, + if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -968,14 +1041,16 @@ async def begin_update_tags( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :type parameters: ~azure.mgmt.containerservice.models.TagsObject + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -985,6 +1060,7 @@ async def begin_update_tags( resource_group_name: str, resource_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -1000,13 +1076,15 @@ async def begin_update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1016,6 +1094,7 @@ async def begin_update_tags( resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO[bytes]], + if_match: Optional[str] = None, **kwargs: Any ) -> AsyncLROPoller[_models.ManagedCluster]: """Updates tags on a managed cluster. @@ -1029,17 +1108,19 @@ async def begin_update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either a TagsObject type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -1050,6 +1131,7 @@ async def begin_update_tags( resource_group_name=resource_group_name, resource_name=resource_name, parameters=parameters, + if_match=if_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -1084,9 +1166,9 @@ def get_long_running_output(pipeline_response): ) async def _delete_initial( - self, resource_group_name: str, resource_name: str, **kwargs: Any + self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1097,13 +1179,14 @@ async def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, api_version=api_version, headers=_headers, params=_params, @@ -1124,7 +1207,11 @@ async def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1138,7 +1225,9 @@ async def _delete_initial( return deserialized # type: ignore @distributed_trace_async - async def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + async def begin_delete( + self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any + ) -> AsyncLROPoller[None]: """Deletes a managed cluster. Deletes a managed cluster. @@ -1148,6 +1237,9 @@ async def begin_delete(self, resource_group_name: str, resource_name: str, **kwa :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -1155,7 +1247,7 @@ async def begin_delete(self, resource_group_name: str, resource_name: str, **kwa _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -1164,6 +1256,7 @@ async def begin_delete(self, resource_group_name: str, resource_name: str, **kwa raw_result = await self._delete_initial( resource_group_name=resource_group_name, resource_name=resource_name, + if_match=if_match, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, @@ -1199,7 +1292,7 @@ async def _reset_service_principal_profile_initial( parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1210,7 +1303,7 @@ async def _reset_service_principal_profile_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -1249,7 +1342,11 @@ async def _reset_service_principal_profile_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1282,8 +1379,7 @@ async def begin_reset_service_principal_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The service principal profile to set on the managed cluster. Required. - :type parameters: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1340,8 +1436,7 @@ async def begin_reset_service_principal_profile( :type resource_name: str :param parameters: The service principal profile to set on the managed cluster. Is either a ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required. - :type parameters: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile or + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile or IO[bytes] :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] @@ -1350,7 +1445,7 @@ async def begin_reset_service_principal_profile( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -1399,7 +1494,7 @@ async def _reset_aad_profile_initial( parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1410,7 +1505,7 @@ async def _reset_aad_profile_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -1449,7 +1544,11 @@ async def _reset_aad_profile_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1484,7 +1583,7 @@ async def begin_reset_aad_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The AAD profile to set on the Managed Cluster. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1545,8 +1644,7 @@ async def begin_reset_aad_profile( :type resource_name: str :param parameters: The AAD profile to set on the Managed Cluster. Is either a ManagedClusterAADProfile type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile or IO[bytes] :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -1554,7 +1652,7 @@ async def begin_reset_aad_profile( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -1599,7 +1697,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- async def _rotate_cluster_certificates_initial( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1610,7 +1708,7 @@ async def _rotate_cluster_certificates_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_rotate_cluster_certificates_request( @@ -1637,7 +1735,11 @@ async def _rotate_cluster_certificates_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1671,7 +1773,7 @@ async def begin_rotate_cluster_certificates( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -1713,7 +1815,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- async def _abort_latest_operation_initial( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1724,7 +1826,7 @@ async def _abort_latest_operation_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_abort_latest_operation_request( @@ -1751,7 +1853,11 @@ async def _abort_latest_operation_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1789,7 +1895,7 @@ async def begin_abort_latest_operation( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -1831,7 +1937,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- async def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1842,7 +1948,7 @@ async def _rotate_service_account_signing_keys_initial( # pylint: disable=name- _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_rotate_service_account_signing_keys_request( @@ -1869,7 +1975,11 @@ async def _rotate_service_account_signing_keys_initial( # pylint: disable=name- except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1902,7 +2012,7 @@ async def begin_rotate_service_account_signing_keys( # pylint: disable=name-too _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -1942,7 +2052,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore async def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1953,7 +2063,7 @@ async def _stop_initial(self, resource_group_name: str, resource_name: str, **kw _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_stop_request( @@ -1980,7 +2090,11 @@ async def _stop_initial(self, resource_group_name: str, resource_name: str, **kw except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2015,7 +2129,7 @@ async def begin_stop(self, resource_group_name: str, resource_name: str, **kwarg _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2055,7 +2169,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore async def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2066,7 +2180,7 @@ async def _start_initial(self, resource_group_name: str, resource_name: str, **k _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_start_request( @@ -2093,7 +2207,11 @@ async def _start_initial(self, resource_group_name: str, resource_name: str, **k except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2125,7 +2243,7 @@ async def begin_start(self, resource_group_name: str, resource_name: str, **kwar _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2171,7 +2289,7 @@ async def _run_command_initial( request_payload: Union[_models.RunCommandRequest, IO[bytes]], **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2182,7 +2300,7 @@ async def _run_command_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -2221,7 +2339,11 @@ async def _run_command_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2256,14 +2378,14 @@ async def begin_run_command( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param request_payload: The run command request. Required. - :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest + :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2296,7 +2418,7 @@ async def begin_run_command( :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2321,18 +2443,17 @@ async def begin_run_command( :type resource_name: str :param request_payload: The run command request. Is either a RunCommandRequest type or a IO[bytes] type. Required. - :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest or - IO[bytes] + :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest or IO[bytes] :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -2394,10 +2515,10 @@ async def get_command_result( :param command_id: Id of the command. Required. :type command_id: str :return: RunCommandResult or None or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult or None + :rtype: ~azure.mgmt.containerservice.models.RunCommandResult or None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2408,7 +2529,7 @@ async def get_command_result( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None) _request = build_get_command_result_request( @@ -2431,7 +2552,11 @@ async def get_command_result( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None response_headers = {} @@ -2449,7 +2574,7 @@ async def get_command_result( @distributed_trace def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> AsyncIterable["_models.OutboundEnvironmentEndpoint"]: + ) -> AsyncItemPaged["_models.OutboundEnvironmentEndpoint"]: """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the specified managed cluster. @@ -2464,16 +2589,16 @@ def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-lo :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2503,7 +2628,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -2529,14 +2654,20 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) @distributed_trace - def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> AsyncIterable["_models.MeshRevisionProfile"]: + def list_mesh_revision_profiles( + self, location: str, **kwargs: Any + ) -> AsyncItemPaged["_models.MeshRevisionProfile"]: """Lists mesh revision profiles for all meshes in the specified location. Contains extra metadata on each revision, including supported revisions, cluster compatibility @@ -2546,16 +2677,16 @@ def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> AsyncIter :type location: str :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MeshRevisionProfile] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2584,7 +2715,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -2610,7 +2741,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -2628,10 +2763,10 @@ async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: An :param mode: The mode of the mesh. Required. :type mode: str :return: MeshRevisionProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile + :rtype: ~azure.mgmt.containerservice.models.MeshRevisionProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2642,7 +2777,7 @@ async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: An _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None) _request = build_get_mesh_revision_profile_request( @@ -2664,7 +2799,11 @@ async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: An if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response) @@ -2676,7 +2815,7 @@ async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: An @distributed_trace def list_mesh_upgrade_profiles( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> AsyncIterable["_models.MeshUpgradeProfile"]: + ) -> AsyncItemPaged["_models.MeshUpgradeProfile"]: """Lists available upgrades for all service meshes in a specific cluster. Lists available upgrades for all service meshes in a specific cluster. @@ -2688,16 +2827,16 @@ def list_mesh_upgrade_profiles( :type resource_name: str :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MeshUpgradeProfile] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2727,7 +2866,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -2753,7 +2892,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -2775,10 +2918,10 @@ async def get_mesh_upgrade_profile( :param mode: The mode of the mesh. Required. :type mode: str :return: MeshUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.MeshUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2789,7 +2932,7 @@ async def get_mesh_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_mesh_upgrade_profile_request( @@ -2812,7 +2955,11 @@ async def get_mesh_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_namespaces_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_namespaces_operations.py new file mode 100644 index 00000000000..ecd1869246e --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_managed_namespaces_operations.py @@ -0,0 +1,809 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core import AsyncPipelineClient +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer +from ...operations._managed_namespaces_operations import ( + build_create_or_update_request, + build_delete_request, + build_get_request, + build_list_by_managed_cluster_request, + build_list_credential_request, + build_update_request, +) +from .._configuration import ContainerServiceClientConfiguration + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list + + +class ManagedNamespacesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s + :attr:`managed_namespaces` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_managed_cluster( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.ManagedNamespace"]: + """Gets a list of managed namespaces in the specified managed cluster. + + Gets a list of managed namespaces in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either ManagedNamespace or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.ManagedNamespaceListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_managed_cluster_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedNamespaceListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> _models.ManagedNamespace: + """Gets the specified namespace of a managed cluster. + + Gets the specified namespace of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.ManagedNamespace, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedNamespace") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: _models.ManagedNamespace, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.ManagedNamespace, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Is either a ManagedNamespace type or a + IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace or IO[bytes] + :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.ManagedNamespace].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.ManagedNamespace]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + if response.status_code == 204: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a namespace. + + Deletes a namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + async def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Required. + :type parameters: ~azure.mgmt.containerservice.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Is either a TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_credential( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the credentials of a namespace. + + Lists the credentials of a namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_credential_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_operations.py similarity index 76% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_operations.py index c8177ce87bf..6277992c37d 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -26,14 +26,13 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._operations import build_list_request +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class Operations: @@ -42,7 +41,7 @@ class Operations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`operations` attribute. """ @@ -50,30 +49,29 @@ class Operations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationValue"]: + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.OperationValue"]: """Gets a list of operations. Gets a list of operations. :return: An iterator like instance of either OperationValue or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.OperationValue] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -100,7 +98,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -126,7 +124,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_patch.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_patch.py index f7dd3251033..8bcb627aa47 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_patch.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_patch.py @@ -1,7 +1,8 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_endpoint_connections_operations.py similarity index 86% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_endpoint_connections_operations.py index 2058d08a3f5..832f404c47e 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_endpoint_connections_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_endpoint_connections_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload +from azure.core import AsyncPipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -29,19 +29,18 @@ from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._private_endpoint_connections_operations import ( build_delete_request, build_get_request, build_list_request, build_update_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class PrivateEndpointConnectionsOperations: @@ -50,7 +49,7 @@ class PrivateEndpointConnectionsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`private_endpoint_connections` attribute. """ @@ -58,11 +57,10 @@ class PrivateEndpointConnectionsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def list( @@ -79,10 +77,10 @@ async def list( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: PrivateEndpointConnectionListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionListResult + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnectionListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -93,7 +91,7 @@ async def list( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) _request = build_list_request( @@ -115,7 +113,11 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response) @@ -141,10 +143,10 @@ async def get( :param private_endpoint_connection_name: The name of the private endpoint connection. Required. :type private_endpoint_connection_name: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -155,7 +157,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) _request = build_get_request( @@ -178,7 +180,11 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) @@ -210,12 +216,12 @@ async def update( :param private_endpoint_connection_name: The name of the private endpoint connection. Required. :type private_endpoint_connection_name: str :param parameters: The updated private endpoint connection. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ @@ -247,7 +253,7 @@ async def update( Default value is "application/json". :paramtype content_type: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ @@ -273,13 +279,12 @@ async def update( :type private_endpoint_connection_name: str :param parameters: The updated private endpoint connection. Is either a PrivateEndpointConnection type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection or IO[bytes] :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -290,7 +295,7 @@ async def update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) @@ -325,7 +330,11 @@ async def update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) @@ -337,7 +346,7 @@ async def update( async def _delete_initial( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -348,7 +357,7 @@ async def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -376,7 +385,11 @@ async def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -407,7 +420,7 @@ async def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_link_resources_operations.py similarity index 75% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_link_resources_operations.py index 91035e80000..75359f8ffed 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_private_link_resources_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_private_link_resources_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,9 +5,10 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar +from azure.core import AsyncPipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -24,14 +24,13 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._private_link_resources_operations import build_list_request +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class PrivateLinkResourcesOperations: @@ -40,7 +39,7 @@ class PrivateLinkResourcesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`private_link_resources` attribute. """ @@ -48,11 +47,10 @@ class PrivateLinkResourcesOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def list( @@ -69,10 +67,10 @@ async def list( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: PrivateLinkResourcesListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResourcesListResult + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResourcesListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -83,7 +81,7 @@ async def list( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None) _request = build_list_request( @@ -105,7 +103,11 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_resolve_private_link_service_id_operations.py similarity index 81% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_resolve_private_link_service_id_operations.py index 393a167f652..e173bbbc3ef 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_resolve_private_link_service_id_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_resolve_private_link_service_id_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload +from azure.core import AsyncPipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -25,14 +25,13 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._resolve_private_link_service_id_operations import build_post_request +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class ResolvePrivateLinkServiceIdOperations: @@ -41,7 +40,7 @@ class ResolvePrivateLinkServiceIdOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`resolve_private_link_service_id` attribute. """ @@ -49,11 +48,10 @@ class ResolvePrivateLinkServiceIdOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def post( @@ -75,12 +73,12 @@ async def post( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters required in order to resolve a private link service ID. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ @@ -109,7 +107,7 @@ async def post( Default value is "application/json". :paramtype content_type: str :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ @@ -132,13 +130,12 @@ async def post( :type resource_name: str :param parameters: Parameters required in order to resolve a private link service ID. Is either a PrivateLinkResource type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource or IO[bytes] :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -149,7 +146,7 @@ async def post( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) @@ -183,7 +180,11 @@ async def post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_snapshots_operations.py similarity index 83% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_snapshots_operations.py index aaece305004..a09e86da1a9 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_snapshots_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_snapshots_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +5,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -28,6 +28,7 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._snapshots_operations import ( build_create_or_update_request, build_delete_request, @@ -36,13 +37,11 @@ build_list_request, build_update_tags_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class SnapshotsOperations: @@ -51,7 +50,7 @@ class SnapshotsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`snapshots` attribute. """ @@ -59,30 +58,28 @@ class SnapshotsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Snapshot"]: """Gets a list of snapshots in the specified subscription. Gets a list of snapshots in the specified subscription. :return: An iterator like instance of either Snapshot or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Snapshot] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -110,7 +107,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -136,14 +133,18 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) @distributed_trace - def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncItemPaged["_models.Snapshot"]: """Lists snapshots in the specified subscription and resource group. Lists snapshots in the specified subscription and resource group. @@ -152,17 +153,16 @@ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Asy Required. :type resource_group_name: str :return: An iterator like instance of either Snapshot or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Snapshot] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -191,7 +191,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -217,7 +217,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -235,10 +239,10 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -249,7 +253,7 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) _request = build_get_request( @@ -271,7 +275,11 @@ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -300,12 +308,12 @@ async def create_or_update( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The snapshot to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :type parameters: ~azure.mgmt.containerservice.models.Snapshot :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -334,7 +342,7 @@ async def create_or_update( Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -357,12 +365,12 @@ async def create_or_update( :type resource_name: str :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.Snapshot or IO[bytes] :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -373,7 +381,7 @@ async def create_or_update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) @@ -407,7 +415,11 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -436,12 +448,12 @@ async def update_tags( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :type parameters: ~azure.mgmt.containerservice.models.TagsObject :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -470,7 +482,7 @@ async def update_tags( Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -493,12 +505,12 @@ async def update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a TagsObject type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -509,7 +521,7 @@ async def update_tags( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) @@ -543,7 +555,11 @@ async def update_tags( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -553,9 +569,7 @@ async def update_tags( return deserialized # type: ignore @distributed_trace_async - async def delete( # pylint: disable=inconsistent-return-statements - self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> None: + async def delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> None: """Deletes a snapshot. Deletes a snapshot. @@ -569,7 +583,7 @@ async def delete( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -580,7 +594,7 @@ async def delete( # pylint: disable=inconsistent-return-statements _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) _request = build_delete_request( @@ -602,7 +616,11 @@ async def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_role_bindings_operations.py similarity index 89% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_role_bindings_operations.py index 168cdaa7f99..07c07d19d19 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_role_bindings_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_role_bindings_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +5,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,19 +32,18 @@ from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._trusted_access_role_bindings_operations import ( build_create_or_update_request, build_delete_request, build_get_request, build_list_request, ) +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class TrustedAccessRoleBindingsOperations: @@ -53,7 +52,7 @@ class TrustedAccessRoleBindingsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`trusted_access_role_bindings` attribute. """ @@ -61,16 +60,15 @@ class TrustedAccessRoleBindingsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> AsyncIterable["_models.TrustedAccessRoleBinding"]: + ) -> AsyncItemPaged["_models.TrustedAccessRoleBinding"]: """List trusted access role bindings. List trusted access role bindings. @@ -83,16 +81,16 @@ def list( :return: An iterator like instance of either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -122,7 +120,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -148,7 +146,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -170,10 +172,10 @@ async def get( :param trusted_access_role_binding_name: The name of trusted access role binding. Required. :type trusted_access_role_binding_name: str :return: TrustedAccessRoleBinding or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :rtype: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -184,7 +186,7 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) _request = build_get_request( @@ -207,7 +209,11 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) @@ -224,7 +230,7 @@ async def _create_or_update_initial( trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -235,7 +241,7 @@ async def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) @@ -275,7 +281,10 @@ async def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -308,15 +317,14 @@ async def begin_create_or_update( :param trusted_access_role_binding_name: The name of trusted access role binding. Required. :type trusted_access_role_binding_name: str :param trusted_access_role_binding: A trusted access role binding. Required. - :type trusted_access_role_binding: - ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -350,7 +358,7 @@ async def begin_create_or_update( :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -376,18 +384,18 @@ async def begin_create_or_update( :type trusted_access_role_binding_name: str :param trusted_access_role_binding: A trusted access role binding. Is either a TrustedAccessRoleBinding type or a IO[bytes] type. Required. - :type trusted_access_role_binding: - ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding or IO[bytes] + :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding + or IO[bytes] :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) @@ -435,7 +443,7 @@ def get_long_running_output(pipeline_response): async def _delete_initial( self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any ) -> AsyncIterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -446,7 +454,7 @@ async def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -474,7 +482,10 @@ async def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} @@ -510,7 +521,7 @@ async def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_roles_operations.py similarity index 77% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_roles_operations.py index c00f5e79d57..cc473f5fdc0 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_trusted_access_roles_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/aio/operations/_trusted_access_roles_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, AsyncIterable, Callable, Dict, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -26,14 +26,13 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models +from ..._utils.serialization import Deserializer, Serializer from ...operations._trusted_access_roles_operations import build_list_request +from .._configuration import ContainerServiceClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +List = list class TrustedAccessRolesOperations: @@ -42,7 +41,7 @@ class TrustedAccessRolesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.aio.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s :attr:`trusted_access_roles` attribute. """ @@ -50,14 +49,13 @@ class TrustedAccessRolesOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.TrustedAccessRole"]: + def list(self, location: str, **kwargs: Any) -> AsyncItemPaged["_models.TrustedAccessRole"]: """List supported trusted access roles. List supported trusted access roles. @@ -66,16 +64,16 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.TrustedAc :type location: str :return: An iterator like instance of either TrustedAccessRole or the result of cls(response) :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRole] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -104,7 +102,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -130,7 +128,11 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py deleted file mode 100644 index f3658f614ee..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models.py +++ /dev/null @@ -1,11 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .v2019_04_01.models import * -from .v2019_04_30.models import * -from .v2022_09_02_preview.models import * -from .v2024_06_02_preview.models import * -from .v2024_07_01.models import * diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/__init__.py new file mode 100644 index 00000000000..1a65b56f013 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/__init__.py @@ -0,0 +1,506 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models_py3 import ( # type: ignore + AbsoluteMonthlySchedule, + AdvancedNetworking, + AdvancedNetworkingObservability, + AdvancedNetworkingSecurity, + AgentPool, + AgentPoolAvailableVersions, + AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem, + AgentPoolDeleteMachinesParameter, + AgentPoolGatewayProfile, + AgentPoolListResult, + AgentPoolNetworkProfile, + AgentPoolSecurityProfile, + AgentPoolStatus, + AgentPoolUpgradeProfile, + AgentPoolUpgradeProfilePropertiesUpgradesItem, + AgentPoolUpgradeSettings, + AgentPoolWindowsProfile, + AzureKeyVaultKms, + ClusterUpgradeSettings, + CompatibleVersions, + ContainerServiceLinuxProfile, + ContainerServiceNetworkProfile, + ContainerServiceSshConfiguration, + ContainerServiceSshPublicKey, + CreationData, + CredentialResult, + CredentialResults, + DailySchedule, + DateSpan, + DelegatedResource, + EndpointDependency, + EndpointDetail, + ErrorAdditionalInfo, + ErrorDetail, + ErrorResponse, + ExtendedLocation, + GPUProfile, + IPTag, + IstioCertificateAuthority, + IstioComponents, + IstioEgressGateway, + IstioIngressGateway, + IstioPluginCertificateAuthority, + IstioServiceMesh, + KubeletConfig, + KubernetesPatchVersion, + KubernetesVersion, + KubernetesVersionCapabilities, + KubernetesVersionListResult, + LinuxOSConfig, + LocalDNSOverride, + LocalDNSProfile, + Machine, + MachineIpAddress, + MachineListResult, + MachineNetworkProperties, + MachineProperties, + MaintenanceConfiguration, + MaintenanceConfigurationListResult, + MaintenanceWindow, + ManagedCluster, + ManagedClusterAADProfile, + ManagedClusterAIToolchainOperatorProfile, + ManagedClusterAPIServerAccessProfile, + ManagedClusterAccessProfile, + ManagedClusterAddonProfile, + ManagedClusterAddonProfileIdentity, + ManagedClusterAgentPoolProfile, + ManagedClusterAgentPoolProfileProperties, + ManagedClusterAutoUpgradeProfile, + ManagedClusterAzureMonitorProfile, + ManagedClusterAzureMonitorProfileKubeStateMetrics, + ManagedClusterAzureMonitorProfileMetrics, + ManagedClusterBootstrapProfile, + ManagedClusterCostAnalysis, + ManagedClusterHTTPProxyConfig, + ManagedClusterIdentity, + ManagedClusterIngressProfile, + ManagedClusterIngressProfileNginx, + ManagedClusterIngressProfileWebAppRouting, + ManagedClusterListResult, + ManagedClusterLoadBalancerProfile, + ManagedClusterLoadBalancerProfileManagedOutboundIPs, + ManagedClusterLoadBalancerProfileOutboundIPPrefixes, + ManagedClusterLoadBalancerProfileOutboundIPs, + ManagedClusterManagedOutboundIPProfile, + ManagedClusterMetricsProfile, + ManagedClusterNATGatewayProfile, + ManagedClusterNodeProvisioningProfile, + ManagedClusterNodeResourceGroupProfile, + ManagedClusterOIDCIssuerProfile, + ManagedClusterPodIdentity, + ManagedClusterPodIdentityException, + ManagedClusterPodIdentityProfile, + ManagedClusterPodIdentityProvisioningError, + ManagedClusterPodIdentityProvisioningErrorBody, + ManagedClusterPodIdentityProvisioningInfo, + ManagedClusterPoolUpgradeProfile, + ManagedClusterPoolUpgradeProfileUpgradesItem, + ManagedClusterPropertiesAutoScalerProfile, + ManagedClusterSKU, + ManagedClusterSecurityProfile, + ManagedClusterSecurityProfileDefender, + ManagedClusterSecurityProfileDefenderSecurityMonitoring, + ManagedClusterSecurityProfileImageCleaner, + ManagedClusterSecurityProfileWorkloadIdentity, + ManagedClusterServicePrincipalProfile, + ManagedClusterStaticEgressGatewayProfile, + ManagedClusterStatus, + ManagedClusterStorageProfile, + ManagedClusterStorageProfileBlobCSIDriver, + ManagedClusterStorageProfileDiskCSIDriver, + ManagedClusterStorageProfileFileCSIDriver, + ManagedClusterStorageProfileSnapshotController, + ManagedClusterUpgradeProfile, + ManagedClusterWindowsProfile, + ManagedClusterWorkloadAutoScalerProfile, + ManagedClusterWorkloadAutoScalerProfileKeda, + ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler, + ManagedNamespace, + ManagedNamespaceListResult, + ManagedServiceIdentityUserAssignedIdentitiesValue, + ManualScaleProfile, + MeshRevision, + MeshRevisionProfile, + MeshRevisionProfileList, + MeshRevisionProfileProperties, + MeshUpgradeProfile, + MeshUpgradeProfileList, + MeshUpgradeProfileProperties, + NamespaceProperties, + NetworkPolicies, + OperationListResult, + OperationValue, + OutboundEnvironmentEndpoint, + OutboundEnvironmentEndpointCollection, + PortRange, + PowerState, + PrivateEndpoint, + PrivateEndpointConnection, + PrivateEndpointConnectionListResult, + PrivateLinkResource, + PrivateLinkResourcesListResult, + PrivateLinkServiceConnectionState, + ProxyResource, + RelativeMonthlySchedule, + Resource, + ResourceQuota, + ResourceReference, + RunCommandRequest, + RunCommandResult, + ScaleProfile, + Schedule, + ServiceMeshProfile, + Snapshot, + SnapshotListResult, + SubResource, + SysctlConfig, + SystemData, + TagsObject, + TimeInWeek, + TimeSpan, + TrackedResource, + TrustedAccessRole, + TrustedAccessRoleBinding, + TrustedAccessRoleBindingListResult, + TrustedAccessRoleListResult, + TrustedAccessRoleRule, + UpgradeOverrideSettings, + UserAssignedIdentity, + VirtualMachineNodes, + VirtualMachinesProfile, + WeeklySchedule, + WindowsGmsaProfile, +) + +from ._container_service_client_enums import ( # type: ignore + AdoptionPolicy, + AdvancedNetworkPolicies, + AgentPoolMode, + AgentPoolSSHAccess, + AgentPoolType, + ArtifactSource, + BackendPoolType, + Code, + ConnectionStatus, + CreatedByType, + DeletePolicy, + Expander, + ExtendedLocationTypes, + Format, + GPUDriver, + GPUInstanceProfile, + IpFamily, + IstioIngressGatewayMode, + KeyVaultNetworkAccessTypes, + KubeletDiskType, + KubernetesSupportPlan, + LicenseType, + LoadBalancerSku, + LocalDNSForwardDestination, + LocalDNSForwardPolicy, + LocalDNSMode, + LocalDNSProtocol, + LocalDNSQueryLogging, + LocalDNSServeStale, + LocalDNSState, + ManagedClusterPodIdentityProvisioningState, + ManagedClusterSKUName, + ManagedClusterSKUTier, + NamespaceProvisioningState, + NetworkDataplane, + NetworkMode, + NetworkPlugin, + NetworkPluginMode, + NetworkPolicy, + NginxIngressControllerType, + NodeOSUpgradeChannel, + NodeProvisioningDefaultNodePools, + NodeProvisioningMode, + OSDiskType, + OSSKU, + OSType, + OutboundType, + PodIPAllocationMode, + PolicyRule, + PrivateEndpointConnectionProvisioningState, + Protocol, + PublicNetworkAccess, + ResourceIdentityType, + RestrictionLevel, + ScaleDownMode, + ScaleSetEvictionPolicy, + ScaleSetPriority, + ServiceMeshMode, + SnapshotType, + TrustedAccessRoleBindingProvisioningState, + Type, + UndrainableNodeBehavior, + UpgradeChannel, + WeekDay, + WorkloadRuntime, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AbsoluteMonthlySchedule", + "AdvancedNetworking", + "AdvancedNetworkingObservability", + "AdvancedNetworkingSecurity", + "AgentPool", + "AgentPoolAvailableVersions", + "AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem", + "AgentPoolDeleteMachinesParameter", + "AgentPoolGatewayProfile", + "AgentPoolListResult", + "AgentPoolNetworkProfile", + "AgentPoolSecurityProfile", + "AgentPoolStatus", + "AgentPoolUpgradeProfile", + "AgentPoolUpgradeProfilePropertiesUpgradesItem", + "AgentPoolUpgradeSettings", + "AgentPoolWindowsProfile", + "AzureKeyVaultKms", + "ClusterUpgradeSettings", + "CompatibleVersions", + "ContainerServiceLinuxProfile", + "ContainerServiceNetworkProfile", + "ContainerServiceSshConfiguration", + "ContainerServiceSshPublicKey", + "CreationData", + "CredentialResult", + "CredentialResults", + "DailySchedule", + "DateSpan", + "DelegatedResource", + "EndpointDependency", + "EndpointDetail", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "ExtendedLocation", + "GPUProfile", + "IPTag", + "IstioCertificateAuthority", + "IstioComponents", + "IstioEgressGateway", + "IstioIngressGateway", + "IstioPluginCertificateAuthority", + "IstioServiceMesh", + "KubeletConfig", + "KubernetesPatchVersion", + "KubernetesVersion", + "KubernetesVersionCapabilities", + "KubernetesVersionListResult", + "LinuxOSConfig", + "LocalDNSOverride", + "LocalDNSProfile", + "Machine", + "MachineIpAddress", + "MachineListResult", + "MachineNetworkProperties", + "MachineProperties", + "MaintenanceConfiguration", + "MaintenanceConfigurationListResult", + "MaintenanceWindow", + "ManagedCluster", + "ManagedClusterAADProfile", + "ManagedClusterAIToolchainOperatorProfile", + "ManagedClusterAPIServerAccessProfile", + "ManagedClusterAccessProfile", + "ManagedClusterAddonProfile", + "ManagedClusterAddonProfileIdentity", + "ManagedClusterAgentPoolProfile", + "ManagedClusterAgentPoolProfileProperties", + "ManagedClusterAutoUpgradeProfile", + "ManagedClusterAzureMonitorProfile", + "ManagedClusterAzureMonitorProfileKubeStateMetrics", + "ManagedClusterAzureMonitorProfileMetrics", + "ManagedClusterBootstrapProfile", + "ManagedClusterCostAnalysis", + "ManagedClusterHTTPProxyConfig", + "ManagedClusterIdentity", + "ManagedClusterIngressProfile", + "ManagedClusterIngressProfileNginx", + "ManagedClusterIngressProfileWebAppRouting", + "ManagedClusterListResult", + "ManagedClusterLoadBalancerProfile", + "ManagedClusterLoadBalancerProfileManagedOutboundIPs", + "ManagedClusterLoadBalancerProfileOutboundIPPrefixes", + "ManagedClusterLoadBalancerProfileOutboundIPs", + "ManagedClusterManagedOutboundIPProfile", + "ManagedClusterMetricsProfile", + "ManagedClusterNATGatewayProfile", + "ManagedClusterNodeProvisioningProfile", + "ManagedClusterNodeResourceGroupProfile", + "ManagedClusterOIDCIssuerProfile", + "ManagedClusterPodIdentity", + "ManagedClusterPodIdentityException", + "ManagedClusterPodIdentityProfile", + "ManagedClusterPodIdentityProvisioningError", + "ManagedClusterPodIdentityProvisioningErrorBody", + "ManagedClusterPodIdentityProvisioningInfo", + "ManagedClusterPoolUpgradeProfile", + "ManagedClusterPoolUpgradeProfileUpgradesItem", + "ManagedClusterPropertiesAutoScalerProfile", + "ManagedClusterSKU", + "ManagedClusterSecurityProfile", + "ManagedClusterSecurityProfileDefender", + "ManagedClusterSecurityProfileDefenderSecurityMonitoring", + "ManagedClusterSecurityProfileImageCleaner", + "ManagedClusterSecurityProfileWorkloadIdentity", + "ManagedClusterServicePrincipalProfile", + "ManagedClusterStaticEgressGatewayProfile", + "ManagedClusterStatus", + "ManagedClusterStorageProfile", + "ManagedClusterStorageProfileBlobCSIDriver", + "ManagedClusterStorageProfileDiskCSIDriver", + "ManagedClusterStorageProfileFileCSIDriver", + "ManagedClusterStorageProfileSnapshotController", + "ManagedClusterUpgradeProfile", + "ManagedClusterWindowsProfile", + "ManagedClusterWorkloadAutoScalerProfile", + "ManagedClusterWorkloadAutoScalerProfileKeda", + "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler", + "ManagedNamespace", + "ManagedNamespaceListResult", + "ManagedServiceIdentityUserAssignedIdentitiesValue", + "ManualScaleProfile", + "MeshRevision", + "MeshRevisionProfile", + "MeshRevisionProfileList", + "MeshRevisionProfileProperties", + "MeshUpgradeProfile", + "MeshUpgradeProfileList", + "MeshUpgradeProfileProperties", + "NamespaceProperties", + "NetworkPolicies", + "OperationListResult", + "OperationValue", + "OutboundEnvironmentEndpoint", + "OutboundEnvironmentEndpointCollection", + "PortRange", + "PowerState", + "PrivateEndpoint", + "PrivateEndpointConnection", + "PrivateEndpointConnectionListResult", + "PrivateLinkResource", + "PrivateLinkResourcesListResult", + "PrivateLinkServiceConnectionState", + "ProxyResource", + "RelativeMonthlySchedule", + "Resource", + "ResourceQuota", + "ResourceReference", + "RunCommandRequest", + "RunCommandResult", + "ScaleProfile", + "Schedule", + "ServiceMeshProfile", + "Snapshot", + "SnapshotListResult", + "SubResource", + "SysctlConfig", + "SystemData", + "TagsObject", + "TimeInWeek", + "TimeSpan", + "TrackedResource", + "TrustedAccessRole", + "TrustedAccessRoleBinding", + "TrustedAccessRoleBindingListResult", + "TrustedAccessRoleListResult", + "TrustedAccessRoleRule", + "UpgradeOverrideSettings", + "UserAssignedIdentity", + "VirtualMachineNodes", + "VirtualMachinesProfile", + "WeeklySchedule", + "WindowsGmsaProfile", + "AdoptionPolicy", + "AdvancedNetworkPolicies", + "AgentPoolMode", + "AgentPoolSSHAccess", + "AgentPoolType", + "ArtifactSource", + "BackendPoolType", + "Code", + "ConnectionStatus", + "CreatedByType", + "DeletePolicy", + "Expander", + "ExtendedLocationTypes", + "Format", + "GPUDriver", + "GPUInstanceProfile", + "IpFamily", + "IstioIngressGatewayMode", + "KeyVaultNetworkAccessTypes", + "KubeletDiskType", + "KubernetesSupportPlan", + "LicenseType", + "LoadBalancerSku", + "LocalDNSForwardDestination", + "LocalDNSForwardPolicy", + "LocalDNSMode", + "LocalDNSProtocol", + "LocalDNSQueryLogging", + "LocalDNSServeStale", + "LocalDNSState", + "ManagedClusterPodIdentityProvisioningState", + "ManagedClusterSKUName", + "ManagedClusterSKUTier", + "NamespaceProvisioningState", + "NetworkDataplane", + "NetworkMode", + "NetworkPlugin", + "NetworkPluginMode", + "NetworkPolicy", + "NginxIngressControllerType", + "NodeOSUpgradeChannel", + "NodeProvisioningDefaultNodePools", + "NodeProvisioningMode", + "OSDiskType", + "OSSKU", + "OSType", + "OutboundType", + "PodIPAllocationMode", + "PolicyRule", + "PrivateEndpointConnectionProvisioningState", + "Protocol", + "PublicNetworkAccess", + "ResourceIdentityType", + "RestrictionLevel", + "ScaleDownMode", + "ScaleSetEvictionPolicy", + "ScaleSetPriority", + "ServiceMeshMode", + "SnapshotType", + "TrustedAccessRoleBindingProvisioningState", + "Type", + "UndrainableNodeBehavior", + "UpgradeChannel", + "WeekDay", + "WorkloadRuntime", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_container_service_client_enums.py similarity index 57% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_container_service_client_enums.py index 80f31aa50fb..8218304f1ba 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_container_service_client_enums.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_container_service_client_enums.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -10,9 +11,38 @@ from azure.core import CaseInsensitiveEnumMeta +class AdoptionPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Action if Kubernetes namespace with same name already exists.""" + + NEVER = "Never" + """If the namespace already exists in Kubernetes, attempts to create that same namespace in ARM + will fail.""" + IF_IDENTICAL = "IfIdentical" + """Take over the existing namespace to be managed by ARM, if there is no difference.""" + ALWAYS = "Always" + """Always take over the existing namespace to be managed by ARM, some fields might be overwritten.""" + + +class AdvancedNetworkPolicies(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enable advanced network policies. This allows users to configure Layer 7 network policies + (FQDN, HTTP, Kafka). Policies themselves must be configured via the Cilium Network Policy + resources, see https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled + only on cilium-based clusters. If not specified, the default value is FQDN if security.enabled + is set to true. + """ + + L7 = "L7" + """Enable Layer7 network policies (FQDN, HTTP/S, Kafka). This option is a superset of the FQDN + option.""" + FQDN = "FQDN" + """Enable FQDN based network policies""" + NONE = "None" + """Disable Layer 7 network policies (FQDN, HTTP/S, Kafka)""" + + class AgentPoolMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A cluster must have at least one 'System' Agent Pool at all times. For additional information - on agent pool restrictions and best practices, see: + """The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at all times. + For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools. """ @@ -22,6 +52,18 @@ class AgentPoolMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): least 2vCPUs and 4GB of memory.""" USER = "User" """User agent pools are primarily for hosting your application pods.""" + GATEWAY = "Gateway" + """Gateway agent pools are dedicated to providing static egress IPs to pods. For more details, see + https://aka.ms/aks/static-egress-gateway.""" + + +class AgentPoolSSHAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SSH access method of an agent pool.""" + + LOCAL_USER = "LocalUser" + """Can SSH onto the node as a local user using private key.""" + DISABLED = "Disabled" + """SSH service will be turned off on the node.""" class AgentPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -31,6 +73,17 @@ class AgentPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Create an Agent Pool backed by a Virtual Machine Scale Set.""" AVAILABILITY_SET = "AvailabilitySet" """Use of this is strongly discouraged.""" + VIRTUAL_MACHINES = "VirtualMachines" + """Create an Agent Pool backed by a Single Instance VM orchestration mode.""" + + +class ArtifactSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The artifact source. The source where the artifacts are downloaded from.""" + + CACHE = "Cache" + """pull images from Azure Container Registry with cache""" + DIRECT = "Direct" + """pull images from Microsoft Artifact Registry""" class BackendPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -71,8 +124,18 @@ class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta): KEY = "Key" +class DeletePolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Delete options of a namespace.""" + + KEEP = "Keep" + """Only delete the ARM resource, keep the Kubernetes namespace. Also delete the ManagedByARM + label.""" + DELETE = "Delete" + """Delete both the ARM resource and the Kubernetes namespace together.""" + + class Expander(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """If not specified, the default is 'random'. See `expanders + """The expander to use when scaling up. If not specified, the default is 'random'. See `expanders `_ for more information. """ @@ -113,6 +176,15 @@ class Format(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Return exec format kubeconfig. This format requires kubelogin binary in the path.""" +class GPUDriver(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Whether to install GPU drivers. When it's not specified, default is Install.""" + + INSTALL = "Install" + """Install driver.""" + NONE = "None" + """Skip driver install.""" + + class GPUInstanceProfile(str, Enum, metaclass=CaseInsensitiveEnumMeta): """GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.""" @@ -140,9 +212,10 @@ class IstioIngressGatewayMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): class KeyVaultNetworkAccessTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Network access of key vault. The possible values are ``Public`` and ``Private``. ``Public`` - means the key vault allows public access from all networks. ``Private`` means the key vault - disables public access and enables private link. The default value is ``Public``. + """Network access of the key vault. Network access of key vault. The possible values are + ``Public`` and ``Private``. ``Public`` means the key vault allows public access from all + networks. ``Private`` means the key vault disables public access and enables private link. The + default value is ``Public``. """ PUBLIC = "Public" @@ -183,9 +256,9 @@ class LicenseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): class LoadBalancerSku(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The default is 'standard'. See `Azure Load Balancer SKUs - `_ for more information about the - differences between load balancer SKUs. + """The load balancer sku for the managed cluster. The default is 'standard'. See `Azure Load + Balancer SKUs `_ for more information + about the differences between load balancer SKUs. """ STANDARD = "standard" @@ -196,6 +269,89 @@ class LoadBalancerSku(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Use a basic Load Balancer with limited functionality.""" +class LocalDNSForwardDestination(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Destination server for DNS queries to be forwarded from localDNS.""" + + CLUSTER_CORE_DNS = "ClusterCoreDNS" + """Forward DNS queries from localDNS to cluster CoreDNS.""" + VNET_DNS = "VnetDNS" + """Forward DNS queries from localDNS to DNS server configured in the VNET. A VNET can have + multiple DNS servers configured.""" + + +class LocalDNSForwardPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Forward policy for selecting upstream DNS server. See `forward plugin + `_ for more information. + """ + + SEQUENTIAL = "Sequential" + """Implements sequential upstream DNS server selection. See `forward plugin + `_ for more information.""" + ROUND_ROBIN = "RoundRobin" + """Implements round robin upstream DNS server selection. See `forward plugin + `_ for more information.""" + RANDOM = "Random" + """Implements random upstream DNS server selection. See `forward plugin + `_ for more information.""" + + +class LocalDNSMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Mode of enablement for localDNS.""" + + PREFERRED = "Preferred" + """If the current orchestrator version supports this feature, prefer enabling localDNS.""" + REQUIRED = "Required" + """Enable localDNS.""" + DISABLED = "Disabled" + """Disable localDNS.""" + + +class LocalDNSProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enforce TCP or prefer UDP protocol for connections from localDNS to upstream DNS server.""" + + PREFER_UDP = "PreferUDP" + """Prefer UDP protocol for connections from localDNS to upstream DNS server.""" + FORCE_TCP = "ForceTCP" + """Enforce TCP protocol for connections from localDNS to upstream DNS server.""" + + +class LocalDNSQueryLogging(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Log level for DNS queries in localDNS.""" + + ERROR = "Error" + """Enables error logging in localDNS. See `errors plugin `_ for + more information.""" + LOG = "Log" + """Enables query logging in localDNS. See `log plugin `_ for more + information.""" + + +class LocalDNSServeStale(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Policy for serving stale data. See `cache plugin `_ for more + information. + """ + + VERIFY = "Verify" + """Serve stale data with verification. First verify that an entry is still unavailable from the + source before sending the expired entry to the client. See `cache plugin + `_ for more information.""" + IMMEDIATE = "Immediate" + """Serve stale data immediately. Send the expired entry to the client before checking to see if + the entry is available from the source. See `cache plugin `_ + for more information.""" + DISABLE = "Disable" + """Disable serving stale data.""" + + +class LocalDNSState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """System-generated state of localDNS.""" + + ENABLED = "Enabled" + """localDNS is enabled.""" + DISABLED = "Disabled" + """localDNS is disabled.""" + + class ManagedClusterPodIdentityProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The current provisioning state of the pod identity.""" @@ -212,11 +368,15 @@ class ManagedClusterSKUName(str, Enum, metaclass=CaseInsensitiveEnumMeta): BASE = "Base" """Base option for the AKS control plane.""" + AUTOMATIC = "Automatic" + """Automatic clusters are optimized to run most production workloads with configuration that + follows AKS best practices and recommendations for cluster and workload setup, scalability, and + security. For more details about Automatic clusters see aka.ms/aks/automatic.""" class ManagedClusterSKUTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """If not specified, the default is 'Free'. See `AKS Pricing Tier - `_ for more details. + """The tier of a managed cluster SKU. If not specified, the default is 'Free'. See `AKS Pricing + Tier `_ for more details. """ PREMIUM = "Premium" @@ -233,6 +393,17 @@ class ManagedClusterSKUTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): for production use cases.""" +class NamespaceProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current provisioning state of the namespace.""" + + UPDATING = "Updating" + DELETING = "Deleting" + CREATING = "Creating" + SUCCEEDED = "Succeeded" + FAILED = "Failed" + CANCELED = "Canceled" + + class NetworkDataplane(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Network dataplane used in the Kubernetes cluster.""" @@ -244,7 +415,9 @@ class NetworkDataplane(str, Enum, metaclass=CaseInsensitiveEnumMeta): class NetworkMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This cannot be specified if networkPlugin is anything other than 'azure'.""" + """The network mode Azure CNI is configured with. This cannot be specified if networkPlugin is + anything other than 'azure'. + """ TRANSPARENT = "transparent" """No bridge is created. Intra-VM Pod to Pod communication is through IP routes created by Azure @@ -297,8 +470,30 @@ class NetworkPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Use Cilium to enforce network policies. This requires networkDataplane to be 'cilium'.""" +class NginxIngressControllerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Ingress type for the default NginxIngressController custom resource.""" + + ANNOTATION_CONTROLLED = "AnnotationControlled" + """The default NginxIngressController will be created. Users can edit the default + NginxIngressController Custom Resource to configure load balancer annotations.""" + EXTERNAL = "External" + """The default NginxIngressController will be created and the operator will provision an external + loadbalancer with it. Any annotation to make the default loadbalancer internal will be + overwritten.""" + INTERNAL = "Internal" + """The default NginxIngressController will be created and the operator will provision an internal + loadbalancer with it. Any annotation to make the default loadbalancer external will be + overwritten.""" + NONE = "None" + """The default Ingress Controller will not be created. It will not be deleted by the system if it + exists. Users should delete the default NginxIngressController Custom Resource manually if + desired.""" + + class NodeOSUpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Manner in which the OS on your nodes is updated. The default is NodeImage.""" + """Node OS Upgrade Channel. Manner in which the OS on your nodes is updated. The default is + NodeImage. + """ NONE = "None" """No attempt to update your machines OS will be made either by OS or by rolling VHDs. This means @@ -324,10 +519,37 @@ class NodeOSUpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta): consumption.""" +class NodeProvisioningDefaultNodePools(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The set of default Karpenter NodePools (CRDs) configured for node provisioning. This field has + no effect unless mode is 'Auto'. Warning: Changing this from Auto to None on an existing + cluster will cause the default Karpenter NodePools to be deleted, which will drain and delete + the nodes associated with those pools. It is strongly recommended to not do this unless there + are idle nodes ready to take the pods evicted by that action. If not specified, the default is + Auto. For more information see aka.ms/aks/nap#node-pools. + """ + + NONE = "None" + """No Karpenter NodePools are provisioned automatically. Automatic scaling will not happen unless + the user creates one or more NodePool CRD instances.""" + AUTO = "Auto" + """A standard set of Karpenter NodePools are provisioned""" + + +class NodeProvisioningMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The node provisioning mode. If not specified, the default is Manual.""" + + MANUAL = "Manual" + """Nodes are provisioned manually by the user""" + AUTO = "Auto" + """Nodes are provisioned automatically by AKS using Karpenter (See aka.ms/aks/nap for more + details). Fixed size Node Pools can still be created, but autoscaling Node Pools cannot be. + (See aka.ms/aks/nap for more details).""" + + class OSDiskType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested - OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more - information see `Ephemeral OS + """The OS disk type to be used for machines in the agent pool. The default is 'Ephemeral' if the + VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults + to 'Managed'. May not be changed after creation. For more information see `Ephemeral OS `_. """ @@ -352,6 +574,11 @@ class OSSKU(str, Enum, metaclass=CaseInsensitiveEnumMeta): AZURE_LINUX = "AzureLinux" """Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.""" + AZURE_LINUX3 = "AzureLinux3" + """Use AzureLinux3 as the OS for node images. Azure Linux is a container-optimized Linux distro + built by Microsoft, visit https://aka.ms/azurelinux for more information. For limitations, + visit https://aka.ms/aks/node-images. For OS migration guidance, see + https://aka.ms/aks/upgrade-os-version.""" CBL_MARINER = "CBLMariner" """Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.""" WINDOWS2019 = "Windows2019" @@ -360,6 +587,14 @@ class OSSKU(str, Enum, metaclass=CaseInsensitiveEnumMeta): WINDOWS2022 = "Windows2022" """Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.""" + UBUNTU2204 = "Ubuntu2204" + """Use Ubuntu2204 as the OS for node images, however, Ubuntu 22.04 may not be supported for all + nodepools. For limitations and supported kubernetes versions, see + https://aka.ms/aks/supported-ubuntu-versions""" + UBUNTU2404 = "Ubuntu2404" + """Use Ubuntu2404 as the OS for node images, however, Ubuntu 24.04 may not be supported for all + nodepools. For limitations and supported kubernetes versions, see + https://aka.ms/aks/supported-ubuntu-versions""" class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -372,8 +607,9 @@ class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta): class OutboundType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This can only be set at cluster creation time and cannot be changed later. For more information - see `egress outbound type `_. + """The outbound (egress) routing method. This can only be set at cluster creation time and cannot + be changed later. For more information see `egress outbound type + `_. """ LOAD_BALANCER = "loadBalancer" @@ -390,6 +626,39 @@ class OutboundType(str, Enum, metaclass=CaseInsensitiveEnumMeta): USER_ASSIGNED_NAT_GATEWAY = "userAssignedNATGateway" """The user-assigned NAT gateway associated to the cluster subnet is used for egress. This is an advanced scenario and requires proper network configuration.""" + NONE = "none" + """The AKS cluster is not set with any outbound-type. All AKS nodes follows Azure VM default + outbound behavior. Please refer to + https://azure.microsoft.com/en-us/updates/default-outbound-access-for-vms-in-azure-will-be-retired-transition-to-a-new-method-of-internet-access/""" + + +class PodIPAllocationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Pod IP Allocation Mode. The IP allocation mode for pods in the agent pool. Must be used with + podSubnetId. The default is 'DynamicIndividual'. + """ + + DYNAMIC_INDIVIDUAL = "DynamicIndividual" + """Each node gets allocated with a non-contiguous list of IP addresses assignable to pods. This is + better for maximizing a small to medium subnet of size /16 or smaller. The Azure CNI cluster + with dynamic IP allocation defaults to this mode if the customer does not explicitly specify a + podIPAllocationMode""" + STATIC_BLOCK = "StaticBlock" + """Each node is statically allocated CIDR block(s) of size /28 = 16 IPs per block to satisfy the + maxPods per node. Number of CIDR blocks >= (maxPods / 16). The block, rather than a single IP, + counts against the Azure Vnet Private IP limit of 65K. Therefore block mode is suitable for + running larger workloads with more than the current limit of 65K pods in a cluster. This mode + is better suited to scale with larger subnets of /15 or bigger""" + + +class PolicyRule(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enum representing different network policy rules.""" + + DENY_ALL = "DenyAll" + """Deny all network traffic.""" + ALLOW_ALL = "AllowAll" + """Allow all network traffic.""" + ALLOW_SAME_NAMESPACE = "AllowSameNamespace" + """Allow traffic within the same namespace.""" class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -412,15 +681,15 @@ class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): class PublicNetworkAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Allow or deny public network access for AKS.""" + """PublicNetworkAccess of the managedCluster. Allow or deny public network access for AKS.""" ENABLED = "Enabled" DISABLED = "Disabled" class ResourceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """For more information see `use managed identities in AKS - `_. + """The type of identity used for the managed cluster. For more information see `use managed + identities in AKS `_. """ SYSTEM_ASSIGNED = "SystemAssigned" @@ -435,6 +704,17 @@ class ResourceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Do not use a managed identity for the Managed Cluster, service principal will be used instead.""" +class RestrictionLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The restriction level applied to the cluster's node resource group. If not specified, the + default is 'Unrestricted'. + """ + + UNRESTRICTED = "Unrestricted" + """All RBAC permissions are allowed on the managed node resource group""" + READ_ONLY = "ReadOnly" + """Only */read RBAC permissions allowed on the managed node resource group""" + + class ScaleDownMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Describes how VMs are added to or removed from Agent Pools. See `billing states `_. @@ -448,9 +728,9 @@ class ScaleDownMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): class ScaleSetEvictionPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The eviction policy specifies what to do with the VM when it is evicted. The default is Delete. - For more information about eviction see `spot VMs - `_. + """The Virtual Machine Scale Set eviction policy. The eviction policy specifies what to do with + the VM when it is evicted. The default is Delete. For more information about eviction see `spot + VMs `_. """ DELETE = "Delete" @@ -498,7 +778,7 @@ class TrustedAccessRoleBindingProvisioningState(str, Enum, metaclass=CaseInsensi class Type(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies on which week of the month the dayOfWeek applies.""" + """The week index. Specifies on which week of the month the dayOfWeek applies.""" FIRST = "First" """First week of the month.""" @@ -512,8 +792,30 @@ class Type(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Last week of the month.""" +class UndrainableNodeBehavior(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Defines the behavior for undrainable nodes during upgrade. The most common cause of undrainable + nodes is Pod Disruption Budgets (PDBs), but other issues, such as pod termination grace period + is exceeding the remaining per-node drain timeout or pod is still being in a running state, can + also cause undrainable nodes. + """ + + CORDON = "Cordon" + """AKS will cordon the blocked nodes and replace them with surge nodes during upgrade. The blocked + nodes will be cordoned and replaced by surge nodes. The blocked nodes will have label + 'kubernetes.azure.com/upgrade-status:Quarantined'. A surge node will be retained for each + blocked node. A best-effort attempt will be made to delete all other surge nodes. If there are + enough surge nodes to replace blocked nodes, then the upgrade operation and the managed cluster + will be in failed state. Otherwise, the upgrade operation and the managed cluster will be in + canceled state.""" + SCHEDULE = "Schedule" + """AKS will mark the blocked nodes schedulable, but the blocked nodes are not upgraded. A + best-effort attempt will be made to delete all surge nodes. The upgrade operation and the + managed cluster will be in failed state if there are any blocked nodes.""" + + class UpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """For more information see `setting the AKS cluster auto-upgrade channel + """The upgrade channel for auto upgrade. The default is 'none'. For more information see `setting + the AKS cluster auto-upgrade channel `_. """ @@ -561,3 +863,7 @@ class WorkloadRuntime(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Nodes will use Kubelet to run standard OCI container workloads.""" WASM_WASI = "WasmWasi" """Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).""" + KATA_VM_ISOLATION = "KataVmIsolation" + """Nodes can use (Kata + Cloud Hypervisor + Hyper-V) to enable Nested VM-based pods. Due to the + use Hyper-V, AKS node OS itself is a nested VM (the root OS) of Hyper-V. Thus it can only be + used with VM series that support Nested Virtualization such as Dv3 series.""" diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_models_py3.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_models_py3.py index 3c3086ec9ba..9bcc668831d 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/_models_py3.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_models_py3.py @@ -1,5 +1,5 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 -# pylint: disable=too-many-lines # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. @@ -7,14 +7,15 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime -from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union +from typing import Any, Optional, TYPE_CHECKING, Union -from ... import _serialization +from .._utils import serialization as _serialization if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from .. import models as _models +JSON = MutableMapping[str, Any] class AbsoluteMonthlySchedule(_serialization.Model): @@ -52,6 +53,122 @@ def __init__(self, *, interval_months: int, day_of_month: int, **kwargs: Any) -> self.day_of_month = day_of_month +class AdvancedNetworking(_serialization.Model): + """Advanced Networking profile for enabling observability and security feature suite on a cluster. + For more information see aka.ms/aksadvancednetworking. + + :ivar enabled: Indicates the enablement of Advanced Networking functionalities of observability + and security on AKS clusters. When this is set to true, all observability and security features + will be set to enabled unless explicitly disabled. If not specified, the default is false. + :vartype enabled: bool + :ivar observability: Observability profile to enable advanced network metrics and flow logs + with historical contexts. + :vartype observability: ~azure.mgmt.containerservice.models.AdvancedNetworkingObservability + :ivar security: Security profile to enable security features on cilium based cluster. + :vartype security: ~azure.mgmt.containerservice.models.AdvancedNetworkingSecurity + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "observability": {"key": "observability", "type": "AdvancedNetworkingObservability"}, + "security": {"key": "security", "type": "AdvancedNetworkingSecurity"}, + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + observability: Optional["_models.AdvancedNetworkingObservability"] = None, + security: Optional["_models.AdvancedNetworkingSecurity"] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Indicates the enablement of Advanced Networking functionalities of + observability and security on AKS clusters. When this is set to true, all observability and + security features will be set to enabled unless explicitly disabled. If not specified, the + default is false. + :paramtype enabled: bool + :keyword observability: Observability profile to enable advanced network metrics and flow logs + with historical contexts. + :paramtype observability: ~azure.mgmt.containerservice.models.AdvancedNetworkingObservability + :keyword security: Security profile to enable security features on cilium based cluster. + :paramtype security: ~azure.mgmt.containerservice.models.AdvancedNetworkingSecurity + """ + super().__init__(**kwargs) + self.enabled = enabled + self.observability = observability + self.security = security + + +class AdvancedNetworkingObservability(_serialization.Model): + """Observability profile to enable advanced network metrics and flow logs with historical + contexts. + + :ivar enabled: Indicates the enablement of Advanced Networking observability functionalities on + clusters. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Indicates the enablement of Advanced Networking observability functionalities + on clusters. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class AdvancedNetworkingSecurity(_serialization.Model): + """Security profile to enable security features on cilium based cluster. + + :ivar enabled: This feature allows user to configure network policy based on DNS (FQDN) names. + It can be enabled only on cilium based clusters. If not specified, the default is false. + :vartype enabled: bool + :ivar advanced_network_policies: Enable advanced network policies. This allows users to + configure Layer 7 network policies (FQDN, HTTP, Kafka). Policies themselves must be configured + via the Cilium Network Policy resources, see + https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled only on + cilium-based clusters. If not specified, the default value is FQDN if security.enabled is set + to true. Known values are: "L7", "FQDN", and "None". + :vartype advanced_network_policies: str or + ~azure.mgmt.containerservice.models.AdvancedNetworkPolicies + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "advanced_network_policies": {"key": "advancedNetworkPolicies", "type": "str"}, + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + advanced_network_policies: Optional[Union[str, "_models.AdvancedNetworkPolicies"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: This feature allows user to configure network policy based on DNS (FQDN) + names. It can be enabled only on cilium based clusters. If not specified, the default is false. + :paramtype enabled: bool + :keyword advanced_network_policies: Enable advanced network policies. This allows users to + configure Layer 7 network policies (FQDN, HTTP, Kafka). Policies themselves must be configured + via the Cilium Network Policy resources, see + https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled only on + cilium-based clusters. If not specified, the default value is FQDN if security.enabled is set + to true. Known values are: "L7", "FQDN", and "None". + :paramtype advanced_network_policies: str or + ~azure.mgmt.containerservice.models.AdvancedNetworkPolicies + """ + super().__init__(**kwargs) + self.enabled = enabled + self.advanced_network_policies = advanced_network_policies + + class SubResource(_serialization.Model): """Reference to another subresource. @@ -81,12 +198,12 @@ class SubResource(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None -class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes +class AgentPool(SubResource): """Agent Pool. Variables are only populated by the server, and will be ignored when sending a request. @@ -98,117 +215,136 @@ class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes :vartype name: str :ivar type: Resource type. :vartype type: str + :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value + will change when the resource is updated. Specify an if-match or if-none-match header with the + eTag value for a subsequent request to enable optimistic concurrency per the normal eTag + convention. + :vartype e_tag: str :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :vartype count: int - :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute - resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted - VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node + contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. + For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :vartype vm_size: str :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :vartype os_disk_size_gb: int - :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS - `_. Known values are: - "Managed" and "Ephemeral". - :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is + 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. + Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see + `Ephemeral OS `_. + Known values are: "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :vartype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :ivar workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :vartype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. - If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just - nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded + string which will be written to /etc/motd after decoding. This allows customization of the + message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a + static string (i.e., will be printed raw and not be executed as a script). + :vartype message_of_the_day: str + :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join + on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype vnet_subnet_id: str - :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod + IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of + the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype pod_subnet_id: str + :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode :ivar max_pods: The maximum number of pods that can run on a node. :vartype max_pods: int :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :ivar max_count: The maximum number of nodes for auto-scaling. :vartype max_count: int :ivar min_count: The minimum number of nodes for auto-scaling. :vartype min_count: int :ivar enable_auto_scaling: Whether to enable auto-scaler. :vartype enable_auto_scaling: bool - :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it - defaults to Delete. Known values are: "Delete" and "Deallocate". - :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode - :ivar type_properties_type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" - and "AvailabilitySet". - :vartype type_properties_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional - information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode + :ivar type_properties_type: The type of Agent Pool. Known values are: + "VirtualMachineScaleSets", "AvailabilitySet", and "VirtualMachines". + :vartype type_properties_type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at + all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version + (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :vartype orchestrator_version: str - :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version - , this field will be exactly equal to it. If orchestratorVersion is - , this field will contain the full version being used. + :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If + orchestratorVersion is a fully specified version , this field will be + exactly equal to it. If orchestratorVersion is , this field will contain the full + version being used. :vartype current_orchestrator_version: str :ivar node_image_version: The version of node image. :vartype node_image_version: str :ivar upgrade_settings: Settings for upgrading the agentpool. - :vartype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings :ivar provisioning_state: The current deployment or provisioning state. :vartype provisioning_state: str - :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool - can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and - does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.models.PowerState :ivar availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :vartype availability_zones: list[str] - :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their - own dedicated public IP addresses. A common scenario is for gaming workloads, where a console - needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node + :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios + may require nodes in a node pool to receive their own dedicated public IP addresses. A common + scenario is for gaming workloads, where a console needs to make a direct connection to a cloud + virtual machine to minimize hops. For more information see `assigning a public IP per node `_. The default is false. :vartype enable_node_public_ip: bool - :ivar node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :vartype node_public_ip_prefix_id: str :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :vartype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :vartype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances. + Possible values are any decimal value greater than zero or -1 which indicates default price to + be up-to on-demand. Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see `spot VMs pricing `_. :vartype spot_max_price: float @@ -222,49 +358,64 @@ class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes :ivar proximity_placement_group_id: The ID for Proximity Placement Group. :vartype proximity_placement_group_id: str :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. - :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :ivar linux_os_config: The OS configuration of Linux agent nodes. - :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :vartype enable_encryption_at_host: bool :ivar enable_ultra_ssd: Whether to enable UltraSSD. :vartype enable_ultra_ssd: bool - :ivar enable_fips: See `Add a FIPS-enabled node pool + :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :vartype enable_fips: bool :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :vartype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :vartype capacity_reservation_group_id: str - :ivar host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision + virtual machines from, used only in creation scenario and not allowed to changed once set. This + is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :vartype host_group_id: str :ivar network_profile: Network-related settings of an agent pool. - :vartype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :ivar windows_profile: The Windows agent pool's specific profile. - :vartype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :ivar security_profile: The security settings of an agent pool. - :vartype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :ivar gpu_profile: GPU settings for the Agent Pool. + :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool. + :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :vartype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :ivar status: Contains read-only information about the Agent Pool. + :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides. + LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For + more details see aka.ms/aks/localdns. + :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, + "e_tag": {"readonly": True}, "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, "current_orchestrator_version": {"readonly": True}, "node_image_version": {"readonly": True}, @@ -275,14 +426,17 @@ class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, + "e_tag": {"key": "properties.eTag", "type": "str"}, "count": {"key": "properties.count", "type": "int"}, "vm_size": {"key": "properties.vmSize", "type": "str"}, "os_disk_size_gb": {"key": "properties.osDiskSizeGB", "type": "int"}, "os_disk_type": {"key": "properties.osDiskType", "type": "str"}, "kubelet_disk_type": {"key": "properties.kubeletDiskType", "type": "str"}, "workload_runtime": {"key": "properties.workloadRuntime", "type": "str"}, + "message_of_the_day": {"key": "properties.messageOfTheDay", "type": "str"}, "vnet_subnet_id": {"key": "properties.vnetSubnetID", "type": "str"}, "pod_subnet_id": {"key": "properties.podSubnetID", "type": "str"}, + "pod_ip_allocation_mode": {"key": "properties.podIPAllocationMode", "type": "str"}, "max_pods": {"key": "properties.maxPods", "type": "int"}, "os_type": {"key": "properties.osType", "type": "str"}, "os_sku": {"key": "properties.osSKU", "type": "str"}, @@ -320,6 +474,15 @@ class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes "network_profile": {"key": "properties.networkProfile", "type": "AgentPoolNetworkProfile"}, "windows_profile": {"key": "properties.windowsProfile", "type": "AgentPoolWindowsProfile"}, "security_profile": {"key": "properties.securityProfile", "type": "AgentPoolSecurityProfile"}, + "gpu_profile": {"key": "properties.gpuProfile", "type": "GPUProfile"}, + "gateway_profile": {"key": "properties.gatewayProfile", "type": "AgentPoolGatewayProfile"}, + "virtual_machines_profile": {"key": "properties.virtualMachinesProfile", "type": "VirtualMachinesProfile"}, + "virtual_machine_nodes_status": { + "key": "properties.virtualMachineNodesStatus", + "type": "[VirtualMachineNodes]", + }, + "status": {"key": "properties.status", "type": "AgentPoolStatus"}, + "local_dns_profile": {"key": "properties.localDNSProfile", "type": "LocalDNSProfile"}, } def __init__( # pylint: disable=too-many-locals @@ -331,8 +494,10 @@ def __init__( # pylint: disable=too-many-locals os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + message_of_the_day: Optional[str] = None, vnet_subnet_id: Optional[str] = None, pod_subnet_id: Optional[str] = None, + pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None, max_pods: Optional[int] = None, os_type: Union[str, "_models.OSType"] = "Linux", os_sku: Optional[Union[str, "_models.OSSKU"]] = None, @@ -345,15 +510,15 @@ def __init__( # pylint: disable=too-many-locals orchestrator_version: Optional[str] = None, upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, power_state: Optional["_models.PowerState"] = None, - availability_zones: Optional[List[str]] = None, + availability_zones: Optional[list[str]] = None, enable_node_public_ip: Optional[bool] = None, node_public_ip_prefix_id: Optional[str] = None, scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", spot_max_price: float = -1, - tags: Optional[Dict[str, str]] = None, - node_labels: Optional[Dict[str, str]] = None, - node_taints: Optional[List[str]] = None, + tags: Optional[dict[str, str]] = None, + node_labels: Optional[dict[str, str]] = None, + node_taints: Optional[list[str]] = None, proximity_placement_group_id: Optional[str] = None, kubelet_config: Optional["_models.KubeletConfig"] = None, linux_os_config: Optional["_models.LinuxOSConfig"] = None, @@ -367,6 +532,12 @@ def __init__( # pylint: disable=too-many-locals network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + gpu_profile: Optional["_models.GPUProfile"] = None, + gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None, + virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None, + virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None, + status: Optional["_models.AgentPoolStatus"] = None, + local_dns_profile: Optional["_models.LocalDNSProfile"] = None, **kwargs: Any ) -> None: """ @@ -374,108 +545,124 @@ def __init__( # pylint: disable=too-many-locals range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :paramtype count: int - :keyword vm_size: VM size availability varies by region. If a node contains insufficient - compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on - restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a + node contains insufficient compute resources (memory, cpu, etc) pods might fail to run + correctly. For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :paramtype vm_size: str :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :paramtype os_disk_size_gb: int - :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS + :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default + is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested + OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more + information see `Ephemeral OS `_. Known values are: "Managed" and "Ephemeral". - :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :paramtype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :keyword workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :paramtype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and - used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to - just nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A + base64-encoded string which will be written to /etc/motd after decoding. This allows + customization of the message of the day for Linux nodes. It must not be specified for Windows + nodes. It must be a static string (i.e., will be printed raw and not be executed as a script). + :paramtype message_of_the_day: str + :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will + join on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype vnet_subnet_id: str - :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, + pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is + of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype pod_subnet_id: str + :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :paramtype pod_ip_allocation_mode: str or + ~azure.mgmt.containerservice.models.PodIPAllocationMode :keyword max_pods: The maximum number of pods that can run on a node. :paramtype max_pods: int :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :keyword max_count: The maximum number of nodes for auto-scaling. :paramtype max_count: int :keyword min_count: The minimum number of nodes for auto-scaling. :paramtype min_count: int :keyword enable_auto_scaling: Whether to enable auto-scaler. :paramtype enable_auto_scaling: bool - :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, - it defaults to Delete. Known values are: "Delete" and "Deallocate". - :paramtype scale_down_mode: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode + :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode :keyword type_properties_type: The type of Agent Pool. Known values are: - "VirtualMachineScaleSets" and "AvailabilitySet". - :paramtype type_properties_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For - additional information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + "VirtualMachineScaleSets", "AvailabilitySet", and "VirtualMachines". + :paramtype type_properties_type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool + at all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch + version (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :paramtype orchestrator_version: str :keyword upgrade_settings: Settings for upgrading the agentpool. - :paramtype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings - :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent - Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs - and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings + :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState :keyword availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :paramtype availability_zones: list[str] - :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive - their own dedicated public IP addresses. A common scenario is for gaming workloads, where a - console needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node - `_. # pylint: disable=line-too-long + :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some + scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. + A common scenario is for gaming workloads, where a console needs to make a direct connection to + a cloud virtual machine to minimize hops. For more information see `assigning a public IP per + node + `_. The default is false. :paramtype enable_node_public_ip: bool - :keyword node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :paramtype node_public_ip_prefix_id: str :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :paramtype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :paramtype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which - indicates the willingness to pay any on-demand price. For more details on spot pricing, see - `spot VMs pricing `_. + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot + instances. Possible values are any decimal value greater than zero or -1 which indicates + default price to be up-to on-demand. Possible values are any decimal value greater than zero or + -1 which indicates the willingness to pay any on-demand price. For more details on spot + pricing, see `spot VMs pricing + `_. :paramtype spot_max_price: float :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. :paramtype tags: dict[str, str] @@ -487,54 +674,71 @@ def __init__( # pylint: disable=too-many-locals :keyword proximity_placement_group_id: The ID for Proximity Placement Group. :paramtype proximity_placement_group_id: str :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. - :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :keyword linux_os_config: The OS configuration of Linux agent nodes. - :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :paramtype enable_encryption_at_host: bool :keyword enable_ultra_ssd: Whether to enable UltraSSD. :paramtype enable_ultra_ssd: bool - :keyword enable_fips: See `Add a FIPS-enabled node pool + :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :paramtype enable_fips: bool :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :paramtype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :paramtype capacity_reservation_group_id: str - :keyword host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to + provision virtual machines from, used only in creation scenario and not allowed to changed once + set. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :paramtype host_group_id: str :keyword network_profile: Network-related settings of an agent pool. - :paramtype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :keyword windows_profile: The Windows agent pool's specific profile. - :paramtype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :keyword security_profile: The security settings of an agent pool. - :paramtype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile - """ - super().__init__(**kwargs) + :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :keyword gpu_profile: GPU settings for the Agent Pool. + :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool. + :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :paramtype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :keyword status: Contains read-only information about the Agent Pool. + :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS + overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS + cluster. For more details see aka.ms/aks/localdns. + :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile + """ + super().__init__(**kwargs) + self.e_tag: Optional[str] = None self.count = count self.vm_size = vm_size self.os_disk_size_gb = os_disk_size_gb self.os_disk_type = os_disk_type self.kubelet_disk_type = kubelet_disk_type self.workload_runtime = workload_runtime + self.message_of_the_day = message_of_the_day self.vnet_subnet_id = vnet_subnet_id self.pod_subnet_id = pod_subnet_id + self.pod_ip_allocation_mode = pod_ip_allocation_mode self.max_pods = max_pods self.os_type = os_type self.os_sku = os_sku @@ -545,10 +749,10 @@ def __init__( # pylint: disable=too-many-locals self.type_properties_type = type_properties_type self.mode = mode self.orchestrator_version = orchestrator_version - self.current_orchestrator_version = None - self.node_image_version = None + self.current_orchestrator_version: Optional[str] = None + self.node_image_version: Optional[str] = None self.upgrade_settings = upgrade_settings - self.provisioning_state = None + self.provisioning_state: Optional[str] = None self.power_state = power_state self.availability_zones = availability_zones self.enable_node_public_ip = enable_node_public_ip @@ -572,6 +776,12 @@ def __init__( # pylint: disable=too-many-locals self.network_profile = network_profile self.windows_profile = windows_profile self.security_profile = security_profile + self.gpu_profile = gpu_profile + self.gateway_profile = gateway_profile + self.virtual_machines_profile = virtual_machines_profile + self.virtual_machine_nodes_status = virtual_machine_nodes_status + self.status = status + self.local_dns_profile = local_dns_profile class AgentPoolAvailableVersions(_serialization.Model): @@ -587,7 +797,7 @@ class AgentPoolAvailableVersions(_serialization.Model): :vartype type: str :ivar agent_pool_versions: List of versions available for agent pool. :vartype agent_pool_versions: - list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] + list[~azure.mgmt.containerservice.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] """ _validation = { @@ -609,18 +819,18 @@ class AgentPoolAvailableVersions(_serialization.Model): def __init__( self, *, - agent_pool_versions: Optional[List["_models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None, + agent_pool_versions: Optional[list["_models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None, **kwargs: Any ) -> None: """ :keyword agent_pool_versions: List of versions available for agent pool. :paramtype agent_pool_versions: - list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] + list[~azure.mgmt.containerservice.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem] """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None self.agent_pool_versions = agent_pool_versions @@ -680,7 +890,7 @@ class AgentPoolDeleteMachinesParameter(_serialization.Model): "machine_names": {"key": "machineNames", "type": "[str]"}, } - def __init__(self, *, machine_names: List[str], **kwargs: Any) -> None: + def __init__(self, *, machine_names: list[str], **kwargs: Any) -> None: """ :keyword machine_names: The agent pool machine names. Required. :paramtype machine_names: list[str] @@ -689,13 +899,47 @@ def __init__(self, *, machine_names: List[str], **kwargs: Any) -> None: self.machine_names = machine_names +class AgentPoolGatewayProfile(_serialization.Model): + """Profile of the managed cluster gateway agent pool. + + :ivar public_ip_prefix_size: The Gateway agent pool associates one public IPPrefix for each + static egress gateway to provide public egress. The size of Public IPPrefix should be selected + by the user. Each node in the agent pool is assigned with one IP from the IPPrefix. The + IPPrefix size thus serves as a cap on the size of the Gateway agent pool. Due to Azure public + IPPrefix size limitation, the valid value range is [28, 31] (/31 = 2 nodes/IPs, /30 = 4 + nodes/IPs, /29 = 8 nodes/IPs, /28 = 16 nodes/IPs). The default value is 31. + :vartype public_ip_prefix_size: int + """ + + _validation = { + "public_ip_prefix_size": {"maximum": 31, "minimum": 28}, + } + + _attribute_map = { + "public_ip_prefix_size": {"key": "publicIPPrefixSize", "type": "int"}, + } + + def __init__(self, *, public_ip_prefix_size: int = 31, **kwargs: Any) -> None: + """ + :keyword public_ip_prefix_size: The Gateway agent pool associates one public IPPrefix for each + static egress gateway to provide public egress. The size of Public IPPrefix should be selected + by the user. Each node in the agent pool is assigned with one IP from the IPPrefix. The + IPPrefix size thus serves as a cap on the size of the Gateway agent pool. Due to Azure public + IPPrefix size limitation, the valid value range is [28, 31] (/31 = 2 nodes/IPs, /30 = 4 + nodes/IPs, /29 = 8 nodes/IPs, /28 = 16 nodes/IPs). The default value is 31. + :paramtype public_ip_prefix_size: int + """ + super().__init__(**kwargs) + self.public_ip_prefix_size = public_ip_prefix_size + + class AgentPoolListResult(_serialization.Model): """The response from the List Agent Pools operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of agent pools. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :vartype value: list[~azure.mgmt.containerservice.models.AgentPool] :ivar next_link: The URL to get the next set of agent pool results. :vartype next_link: str """ @@ -709,24 +953,24 @@ class AgentPoolListResult(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.AgentPool"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.AgentPool"]] = None, **kwargs: Any) -> None: """ :keyword value: The list of agent pools. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :paramtype value: list[~azure.mgmt.containerservice.models.AgentPool] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class AgentPoolNetworkProfile(_serialization.Model): """Network settings of an agent pool. :ivar node_public_ip_tags: IPTags of instance-level public IPs. - :vartype node_public_ip_tags: list[~azure.mgmt.containerservice.v2024_07_01.models.IPTag] + :vartype node_public_ip_tags: list[~azure.mgmt.containerservice.models.IPTag] :ivar allowed_host_ports: The port ranges that are allowed to access. The specified ranges are allowed to overlap. - :vartype allowed_host_ports: list[~azure.mgmt.containerservice.v2024_07_01.models.PortRange] + :vartype allowed_host_ports: list[~azure.mgmt.containerservice.models.PortRange] :ivar application_security_groups: The IDs of the application security groups which agent pool will associate when created. :vartype application_security_groups: list[str] @@ -741,17 +985,17 @@ class AgentPoolNetworkProfile(_serialization.Model): def __init__( self, *, - node_public_ip_tags: Optional[List["_models.IPTag"]] = None, - allowed_host_ports: Optional[List["_models.PortRange"]] = None, - application_security_groups: Optional[List[str]] = None, + node_public_ip_tags: Optional[list["_models.IPTag"]] = None, + allowed_host_ports: Optional[list["_models.PortRange"]] = None, + application_security_groups: Optional[list[str]] = None, **kwargs: Any ) -> None: """ :keyword node_public_ip_tags: IPTags of instance-level public IPs. - :paramtype node_public_ip_tags: list[~azure.mgmt.containerservice.v2024_07_01.models.IPTag] + :paramtype node_public_ip_tags: list[~azure.mgmt.containerservice.models.IPTag] :keyword allowed_host_ports: The port ranges that are allowed to access. The specified ranges are allowed to overlap. - :paramtype allowed_host_ports: list[~azure.mgmt.containerservice.v2024_07_01.models.PortRange] + :paramtype allowed_host_ports: list[~azure.mgmt.containerservice.models.PortRange] :keyword application_security_groups: The IDs of the application security groups which agent pool will associate when created. :paramtype application_security_groups: list[str] @@ -773,15 +1017,24 @@ class AgentPoolSecurityProfile(_serialization.Model): signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch. If not specified, the default is false. :vartype enable_secure_boot: bool + :ivar ssh_access: SSH access method of an agent pool. Known values are: "LocalUser" and + "Disabled". + :vartype ssh_access: str or ~azure.mgmt.containerservice.models.AgentPoolSSHAccess """ _attribute_map = { "enable_vtpm": {"key": "enableVTPM", "type": "bool"}, "enable_secure_boot": {"key": "enableSecureBoot", "type": "bool"}, + "ssh_access": {"key": "sshAccess", "type": "str"}, } def __init__( - self, *, enable_vtpm: Optional[bool] = None, enable_secure_boot: Optional[bool] = None, **kwargs: Any + self, + *, + enable_vtpm: Optional[bool] = None, + enable_secure_boot: Optional[bool] = None, + ssh_access: Optional[Union[str, "_models.AgentPoolSSHAccess"]] = None, + **kwargs: Any ) -> None: """ :keyword enable_vtpm: vTPM is a Trusted Launch feature for configuring a dedicated secure vault @@ -792,10 +1045,38 @@ def __init__( signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch. If not specified, the default is false. :paramtype enable_secure_boot: bool + :keyword ssh_access: SSH access method of an agent pool. Known values are: "LocalUser" and + "Disabled". + :paramtype ssh_access: str or ~azure.mgmt.containerservice.models.AgentPoolSSHAccess """ super().__init__(**kwargs) self.enable_vtpm = enable_vtpm self.enable_secure_boot = enable_secure_boot + self.ssh_access = ssh_access + + +class AgentPoolStatus(_serialization.Model): + """Contains read-only information about the Agent Pool. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar provisioning_error: The error detail information of the agent pool. Preserves the + detailed info of failure. If there was no error, this field is omitted. + :vartype provisioning_error: ~azure.mgmt.containerservice.models.ErrorDetail + """ + + _validation = { + "provisioning_error": {"readonly": True}, + } + + _attribute_map = { + "provisioning_error": {"key": "provisioningError", "type": "ErrorDetail"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.provisioning_error: Optional["_models.ErrorDetail"] = None class AgentPoolUpgradeProfile(_serialization.Model): @@ -815,10 +1096,10 @@ class AgentPoolUpgradeProfile(_serialization.Model): :vartype kubernetes_version: str :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar upgrades: List of orchestrator types and versions available for upgrade. :vartype upgrades: - list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] + list[~azure.mgmt.containerservice.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] :ivar latest_node_image_version: The latest AKS supported node image version. :vartype latest_node_image_version: str """ @@ -846,7 +1127,7 @@ def __init__( *, kubernetes_version: str, os_type: Union[str, "_models.OSType"] = "Linux", - upgrades: Optional[List["_models.AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None, + upgrades: Optional[list["_models.AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None, latest_node_image_version: Optional[str] = None, **kwargs: Any ) -> None: @@ -855,17 +1136,17 @@ def __init__( :paramtype kubernetes_version: str :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType :keyword upgrades: List of orchestrator types and versions available for upgrade. :paramtype upgrades: - list[~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] + list[~azure.mgmt.containerservice.models.AgentPoolUpgradeProfilePropertiesUpgradesItem] :keyword latest_node_image_version: The latest AKS supported node image version. :paramtype latest_node_image_version: str """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None self.kubernetes_version = kubernetes_version self.os_type = os_type self.upgrades = upgrades @@ -903,21 +1184,36 @@ def __init__( class AgentPoolUpgradeSettings(_serialization.Model): """Settings for upgrading an agentpool. - :ivar max_surge: This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). - If a percentage is specified, it is the percentage of the total agent pool size at the time of - the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is - 1. For more information, including best practices, see: - https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade. + :ivar max_surge: The maximum number or percentage of nodes that are surged during upgrade. This + can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is + specified, it is the percentage of the total agent pool size at the time of the upgrade. For + percentages, fractional nodes are rounded up. If not specified, the default is 10%. For more + information, including best practices, see: + https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster. :vartype max_surge: str - :ivar drain_timeout_in_minutes: The amount of time (in minutes) to wait on eviction of pods and - graceful termination per node. This eviction wait time honors waiting on pod disruption - budgets. If this time is exceeded, the upgrade fails. If not specified, the default is 30 - minutes. + :ivar max_unavailable: The maximum number or percentage of nodes that can be simultaneously + unavailable during upgrade. This can either be set to an integer (e.g. '1') or a percentage + (e.g. '5%'). If a percentage is specified, it is the percentage of the total agent pool size at + the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, + the default is 0. For more information, including best practices, see: + https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster. + :vartype max_unavailable: str + :ivar drain_timeout_in_minutes: The drain timeout for a node. The amount of time (in minutes) + to wait on eviction of pods and graceful termination per node. This eviction wait time honors + waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. If not + specified, the default is 30 minutes. :vartype drain_timeout_in_minutes: int - :ivar node_soak_duration_in_minutes: The amount of time (in minutes) to wait after draining a - node and before reimaging it and moving on to next node. If not specified, the default is 0 - minutes. + :ivar node_soak_duration_in_minutes: The soak duration for a node. The amount of time (in + minutes) to wait after draining a node and before reimaging it and moving on to next node. If + not specified, the default is 0 minutes. :vartype node_soak_duration_in_minutes: int + :ivar undrainable_node_behavior: Defines the behavior for undrainable nodes during upgrade. The + most common cause of undrainable nodes is Pod Disruption Budgets (PDBs), but other issues, such + as pod termination grace period is exceeding the remaining per-node drain timeout or pod is + still being in a running state, can also cause undrainable nodes. Known values are: "Cordon" + and "Schedule". + :vartype undrainable_node_behavior: str or + ~azure.mgmt.containerservice.models.UndrainableNodeBehavior """ _validation = { @@ -927,47 +1223,68 @@ class AgentPoolUpgradeSettings(_serialization.Model): _attribute_map = { "max_surge": {"key": "maxSurge", "type": "str"}, + "max_unavailable": {"key": "maxUnavailable", "type": "str"}, "drain_timeout_in_minutes": {"key": "drainTimeoutInMinutes", "type": "int"}, "node_soak_duration_in_minutes": {"key": "nodeSoakDurationInMinutes", "type": "int"}, + "undrainable_node_behavior": {"key": "undrainableNodeBehavior", "type": "str"}, } def __init__( self, *, max_surge: Optional[str] = None, + max_unavailable: Optional[str] = None, drain_timeout_in_minutes: Optional[int] = None, node_soak_duration_in_minutes: Optional[int] = None, + undrainable_node_behavior: Optional[Union[str, "_models.UndrainableNodeBehavior"]] = None, **kwargs: Any ) -> None: """ - :keyword max_surge: This can either be set to an integer (e.g. '5') or a percentage (e.g. - '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the - time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the - default is 1. For more information, including best practices, see: - https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade. + :keyword max_surge: The maximum number or percentage of nodes that are surged during upgrade. + This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage + is specified, it is the percentage of the total agent pool size at the time of the upgrade. For + percentages, fractional nodes are rounded up. If not specified, the default is 10%. For more + information, including best practices, see: + https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster. :paramtype max_surge: str - :keyword drain_timeout_in_minutes: The amount of time (in minutes) to wait on eviction of pods - and graceful termination per node. This eviction wait time honors waiting on pod disruption - budgets. If this time is exceeded, the upgrade fails. If not specified, the default is 30 - minutes. + :keyword max_unavailable: The maximum number or percentage of nodes that can be simultaneously + unavailable during upgrade. This can either be set to an integer (e.g. '1') or a percentage + (e.g. '5%'). If a percentage is specified, it is the percentage of the total agent pool size at + the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, + the default is 0. For more information, including best practices, see: + https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster. + :paramtype max_unavailable: str + :keyword drain_timeout_in_minutes: The drain timeout for a node. The amount of time (in + minutes) to wait on eviction of pods and graceful termination per node. This eviction wait time + honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. If not + specified, the default is 30 minutes. :paramtype drain_timeout_in_minutes: int - :keyword node_soak_duration_in_minutes: The amount of time (in minutes) to wait after draining - a node and before reimaging it and moving on to next node. If not specified, the default is 0 - minutes. + :keyword node_soak_duration_in_minutes: The soak duration for a node. The amount of time (in + minutes) to wait after draining a node and before reimaging it and moving on to next node. If + not specified, the default is 0 minutes. :paramtype node_soak_duration_in_minutes: int + :keyword undrainable_node_behavior: Defines the behavior for undrainable nodes during upgrade. + The most common cause of undrainable nodes is Pod Disruption Budgets (PDBs), but other issues, + such as pod termination grace period is exceeding the remaining per-node drain timeout or pod + is still being in a running state, can also cause undrainable nodes. Known values are: "Cordon" + and "Schedule". + :paramtype undrainable_node_behavior: str or + ~azure.mgmt.containerservice.models.UndrainableNodeBehavior """ super().__init__(**kwargs) self.max_surge = max_surge + self.max_unavailable = max_unavailable self.drain_timeout_in_minutes = drain_timeout_in_minutes self.node_soak_duration_in_minutes = node_soak_duration_in_minutes + self.undrainable_node_behavior = undrainable_node_behavior class AgentPoolWindowsProfile(_serialization.Model): """The Windows agent pool's specific profile. - :ivar disable_outbound_nat: The default value is false. Outbound NAT can only be disabled if - the cluster outboundType is NAT Gateway and the Windows agent pool does not have node public IP - enabled. + :ivar disable_outbound_nat: Whether to disable OutboundNAT in windows nodes. The default value + is false. Outbound NAT can only be disabled if the cluster outboundType is NAT Gateway and the + Windows agent pool does not have node public IP enabled. :vartype disable_outbound_nat: bool """ @@ -977,9 +1294,9 @@ class AgentPoolWindowsProfile(_serialization.Model): def __init__(self, *, disable_outbound_nat: Optional[bool] = None, **kwargs: Any) -> None: """ - :keyword disable_outbound_nat: The default value is false. Outbound NAT can only be disabled if - the cluster outboundType is NAT Gateway and the Windows agent pool does not have node public IP - enabled. + :keyword disable_outbound_nat: Whether to disable OutboundNAT in windows nodes. The default + value is false. Outbound NAT can only be disabled if the cluster outboundType is NAT Gateway + and the Windows agent pool does not have node public IP enabled. :paramtype disable_outbound_nat: bool """ super().__init__(**kwargs) @@ -992,17 +1309,18 @@ class AzureKeyVaultKms(_serialization.Model): :ivar enabled: Whether to enable Azure Key Vault key management service. The default is false. :vartype enabled: bool :ivar key_id: Identifier of Azure Key Vault key. See `key identifier format - `_ # pylint: disable=line-too-long + `_ for more details. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. When Azure Key Vault key management service is disabled, leave the field empty. :vartype key_id: str - :ivar key_vault_network_access: Network access of key vault. The possible values are ``Public`` - and ``Private``. ``Public`` means the key vault allows public access from all networks. - ``Private`` means the key vault disables public access and enables private link. The default - value is ``Public``. Known values are: "Public" and "Private". + :ivar key_vault_network_access: Network access of the key vault. Network access of key vault. + The possible values are ``Public`` and ``Private``. ``Public`` means the key vault allows + public access from all networks. ``Private`` means the key vault disables public access and + enables private link. The default value is ``Public``. Known values are: "Public" and + "Private". :vartype key_vault_network_access: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KeyVaultNetworkAccessTypes + ~azure.mgmt.containerservice.models.KeyVaultNetworkAccessTypes :ivar key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is ``Private``\\ , this field is required and must be a valid resource ID. When keyVaultNetworkAccess is ``Public``\\ , leave the field empty. @@ -1030,17 +1348,18 @@ def __init__( false. :paramtype enabled: bool :keyword key_id: Identifier of Azure Key Vault key. See `key identifier format - `_ # pylint: disable=line-too-long + `_ for more details. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. When Azure Key Vault key management service is disabled, leave the field empty. :paramtype key_id: str - :keyword key_vault_network_access: Network access of key vault. The possible values are - ``Public`` and ``Private``. ``Public`` means the key vault allows public access from all - networks. ``Private`` means the key vault disables public access and enables private link. The - default value is ``Public``. Known values are: "Public" and "Private". + :keyword key_vault_network_access: Network access of the key vault. Network access of key + vault. The possible values are ``Public`` and ``Private``. ``Public`` means the key vault + allows public access from all networks. ``Private`` means the key vault disables public access + and enables private link. The default value is ``Public``. Known values are: "Public" and + "Private". :paramtype key_vault_network_access: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KeyVaultNetworkAccessTypes + ~azure.mgmt.containerservice.models.KeyVaultNetworkAccessTypes :keyword key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is ``Private``\\ , this field is required and must be a valid resource ID. When keyVaultNetworkAccess is ``Public``\\ , leave the field empty. @@ -1053,64 +1372,11 @@ def __init__( self.key_vault_resource_id = key_vault_resource_id -class CloudErrorBody(_serialization.Model): - """An error response from the Container service. - - :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed - programmatically. - :vartype code: str - :ivar message: A message describing the error, intended to be suitable for display in a user - interface. - :vartype message: str - :ivar target: The target of the particular error. For example, the name of the property in - error. - :vartype target: str - :ivar details: A list of additional details about the error. - :vartype details: list[~azure.mgmt.containerservice.v2024_07_01.models.CloudErrorBody] - """ - - _attribute_map = { - "code": {"key": "code", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "details": {"key": "details", "type": "[CloudErrorBody]"}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - target: Optional[str] = None, - details: Optional[List["_models.CloudErrorBody"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword code: An identifier for the error. Codes are invariant and are intended to be consumed - programmatically. - :paramtype code: str - :keyword message: A message describing the error, intended to be suitable for display in a user - interface. - :paramtype message: str - :keyword target: The target of the particular error. For example, the name of the property in - error. - :paramtype target: str - :keyword details: A list of additional details about the error. - :paramtype details: list[~azure.mgmt.containerservice.v2024_07_01.models.CloudErrorBody] - """ - super().__init__(**kwargs) - self.code = code - self.message = message - self.target = target - self.details = details - - class ClusterUpgradeSettings(_serialization.Model): """Settings for upgrading a cluster. :ivar override_settings: Settings for overrides. - :vartype override_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeOverrideSettings + :vartype override_settings: ~azure.mgmt.containerservice.models.UpgradeOverrideSettings """ _attribute_map = { @@ -1120,8 +1386,7 @@ class ClusterUpgradeSettings(_serialization.Model): def __init__(self, *, override_settings: Optional["_models.UpgradeOverrideSettings"] = None, **kwargs: Any) -> None: """ :keyword override_settings: Settings for overrides. - :paramtype override_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeOverrideSettings + :paramtype override_settings: ~azure.mgmt.containerservice.models.UpgradeOverrideSettings """ super().__init__(**kwargs) self.override_settings = override_settings @@ -1141,7 +1406,7 @@ class CompatibleVersions(_serialization.Model): "versions": {"key": "versions", "type": "[str]"}, } - def __init__(self, *, name: Optional[str] = None, versions: Optional[List[str]] = None, **kwargs: Any) -> None: + def __init__(self, *, name: Optional[str] = None, versions: Optional[list[str]] = None, **kwargs: Any) -> None: """ :keyword name: The product/service name. :paramtype name: str @@ -1161,7 +1426,7 @@ class ContainerServiceLinuxProfile(_serialization.Model): :ivar admin_username: The administrator username to use for Linux VMs. Required. :vartype admin_username: str :ivar ssh: The SSH configuration for Linux-based VMs running on Azure. Required. - :vartype ssh: ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshConfiguration + :vartype ssh: ~azure.mgmt.containerservice.models.ContainerServiceSshConfiguration """ _validation = { @@ -1179,33 +1444,33 @@ def __init__(self, *, admin_username: str, ssh: "_models.ContainerServiceSshConf :keyword admin_username: The administrator username to use for Linux VMs. Required. :paramtype admin_username: str :keyword ssh: The SSH configuration for Linux-based VMs running on Azure. Required. - :paramtype ssh: - ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshConfiguration + :paramtype ssh: ~azure.mgmt.containerservice.models.ContainerServiceSshConfiguration """ super().__init__(**kwargs) self.admin_username = admin_username self.ssh = ssh -class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=too-many-instance-attributes +class ContainerServiceNetworkProfile(_serialization.Model): """Profile of network configuration. :ivar network_plugin: Network plugin used for building the Kubernetes network. Known values are: "azure", "kubenet", and "none". - :vartype network_plugin: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPlugin + :vartype network_plugin: str or ~azure.mgmt.containerservice.models.NetworkPlugin :ivar network_plugin_mode: The mode the network plugin should use. "overlay" - :vartype network_plugin_mode: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPluginMode + :vartype network_plugin_mode: str or ~azure.mgmt.containerservice.models.NetworkPluginMode :ivar network_policy: Network policy used for building the Kubernetes network. Known values are: "none", "calico", "azure", and "cilium". - :vartype network_policy: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPolicy - :ivar network_mode: This cannot be specified if networkPlugin is anything other than 'azure'. - Known values are: "transparent" and "bridge". - :vartype network_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkMode + :vartype network_policy: str or ~azure.mgmt.containerservice.models.NetworkPolicy + :ivar network_mode: The network mode Azure CNI is configured with. This cannot be specified if + networkPlugin is anything other than 'azure'. Known values are: "transparent" and "bridge". + :vartype network_mode: str or ~azure.mgmt.containerservice.models.NetworkMode :ivar network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are: "azure" and "cilium". - :vartype network_dataplane: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NetworkDataplane + :vartype network_dataplane: str or ~azure.mgmt.containerservice.models.NetworkDataplane + :ivar advanced_networking: Advanced Networking profile for enabling observability and security + feature suite on a cluster. For more information see aka.ms/aksadvancednetworking. + :vartype advanced_networking: ~azure.mgmt.containerservice.models.AdvancedNetworking :ivar pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used. :vartype pod_cidr: str :ivar service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must @@ -1214,33 +1479,39 @@ class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=t :ivar dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr. :vartype dns_service_ip: str - :ivar outbound_type: This can only be set at cluster creation time and cannot be changed later. - For more information see `egress outbound type + :ivar outbound_type: The outbound (egress) routing method. This can only be set at cluster + creation time and cannot be changed later. For more information see `egress outbound type `_. Known values are: "loadBalancer", - "userDefinedRouting", "managedNATGateway", and "userAssignedNATGateway". - :vartype outbound_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OutboundType - :ivar load_balancer_sku: The default is 'standard'. See `Azure Load Balancer SKUs + "userDefinedRouting", "managedNATGateway", "userAssignedNATGateway", and "none". + :vartype outbound_type: str or ~azure.mgmt.containerservice.models.OutboundType + :ivar load_balancer_sku: The load balancer sku for the managed cluster. The default is + 'standard'. See `Azure Load Balancer SKUs `_ for more information about the differences between load balancer SKUs. Known values are: "standard" and "basic". - :vartype load_balancer_sku: str or - ~azure.mgmt.containerservice.v2024_07_01.models.LoadBalancerSku + :vartype load_balancer_sku: str or ~azure.mgmt.containerservice.models.LoadBalancerSku :ivar load_balancer_profile: Profile of the cluster load balancer. :vartype load_balancer_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfile + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfile :ivar nat_gateway_profile: Profile of the cluster NAT gateway. :vartype nat_gateway_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterNATGatewayProfile - :ivar pod_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each - IP family (IPv4/IPv6), is expected for dual-stack networking. + ~azure.mgmt.containerservice.models.ManagedClusterNATGatewayProfile + :ivar static_egress_gateway_profile: The profile for Static Egress Gateway addon. For more + details about Static Egress Gateway, see https://aka.ms/aks/static-egress-gateway. + :vartype static_egress_gateway_profile: + ~azure.mgmt.containerservice.models.ManagedClusterStaticEgressGatewayProfile + :ivar pod_cidrs: The CIDR notation IP ranges from which to assign pod IPs. One IPv4 CIDR is + expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is + expected for dual-stack networking. :vartype pod_cidrs: list[str] - :ivar service_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for - each IP family (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with - any Subnet IP ranges. + :ivar service_cidrs: The CIDR notation IP ranges from which to assign service cluster IPs. One + IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family + (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with any Subnet IP + ranges. :vartype service_cidrs: list[str] - :ivar ip_families: IP families are used to determine single-stack or dual-stack clusters. For - single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and - IPv6. - :vartype ip_families: list[str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily] + :ivar ip_families: The IP families used to specify IP versions available to the cluster. IP + families are used to determine single-stack or dual-stack clusters. For single-stack, the + expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. + :vartype ip_families: list[str or ~azure.mgmt.containerservice.models.IpFamily] """ _validation = { @@ -1257,6 +1528,7 @@ class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=t "network_policy": {"key": "networkPolicy", "type": "str"}, "network_mode": {"key": "networkMode", "type": "str"}, "network_dataplane": {"key": "networkDataplane", "type": "str"}, + "advanced_networking": {"key": "advancedNetworking", "type": "AdvancedNetworking"}, "pod_cidr": {"key": "podCidr", "type": "str"}, "service_cidr": {"key": "serviceCidr", "type": "str"}, "dns_service_ip": {"key": "dnsServiceIP", "type": "str"}, @@ -1264,6 +1536,10 @@ class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=t "load_balancer_sku": {"key": "loadBalancerSku", "type": "str"}, "load_balancer_profile": {"key": "loadBalancerProfile", "type": "ManagedClusterLoadBalancerProfile"}, "nat_gateway_profile": {"key": "natGatewayProfile", "type": "ManagedClusterNATGatewayProfile"}, + "static_egress_gateway_profile": { + "key": "staticEgressGatewayProfile", + "type": "ManagedClusterStaticEgressGatewayProfile", + }, "pod_cidrs": {"key": "podCidrs", "type": "[str]"}, "service_cidrs": {"key": "serviceCidrs", "type": "[str]"}, "ip_families": {"key": "ipFamilies", "type": "[str]"}, @@ -1272,11 +1548,12 @@ class ContainerServiceNetworkProfile(_serialization.Model): # pylint: disable=t def __init__( self, *, - network_plugin: Union[str, "_models.NetworkPlugin"] = "kubenet", + network_plugin: Optional[Union[str, "_models.NetworkPlugin"]] = None, network_plugin_mode: Optional[Union[str, "_models.NetworkPluginMode"]] = None, network_policy: Optional[Union[str, "_models.NetworkPolicy"]] = None, network_mode: Optional[Union[str, "_models.NetworkMode"]] = None, network_dataplane: Optional[Union[str, "_models.NetworkDataplane"]] = None, + advanced_networking: Optional["_models.AdvancedNetworking"] = None, pod_cidr: str = "10.244.0.0/16", service_cidr: str = "10.0.0.0/16", dns_service_ip: str = "10.0.0.10", @@ -1284,28 +1561,30 @@ def __init__( load_balancer_sku: Optional[Union[str, "_models.LoadBalancerSku"]] = None, load_balancer_profile: Optional["_models.ManagedClusterLoadBalancerProfile"] = None, nat_gateway_profile: Optional["_models.ManagedClusterNATGatewayProfile"] = None, - pod_cidrs: Optional[List[str]] = None, - service_cidrs: Optional[List[str]] = None, - ip_families: Optional[List[Union[str, "_models.IpFamily"]]] = None, + static_egress_gateway_profile: Optional["_models.ManagedClusterStaticEgressGatewayProfile"] = None, + pod_cidrs: Optional[list[str]] = None, + service_cidrs: Optional[list[str]] = None, + ip_families: Optional[list[Union[str, "_models.IpFamily"]]] = None, **kwargs: Any ) -> None: """ :keyword network_plugin: Network plugin used for building the Kubernetes network. Known values are: "azure", "kubenet", and "none". - :paramtype network_plugin: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPlugin + :paramtype network_plugin: str or ~azure.mgmt.containerservice.models.NetworkPlugin :keyword network_plugin_mode: The mode the network plugin should use. "overlay" - :paramtype network_plugin_mode: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPluginMode + :paramtype network_plugin_mode: str or ~azure.mgmt.containerservice.models.NetworkPluginMode :keyword network_policy: Network policy used for building the Kubernetes network. Known values are: "none", "calico", "azure", and "cilium". - :paramtype network_policy: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkPolicy - :keyword network_mode: This cannot be specified if networkPlugin is anything other than - 'azure'. Known values are: "transparent" and "bridge". - :paramtype network_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.NetworkMode + :paramtype network_policy: str or ~azure.mgmt.containerservice.models.NetworkPolicy + :keyword network_mode: The network mode Azure CNI is configured with. This cannot be specified + if networkPlugin is anything other than 'azure'. Known values are: "transparent" and "bridge". + :paramtype network_mode: str or ~azure.mgmt.containerservice.models.NetworkMode :keyword network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are: "azure" and "cilium". - :paramtype network_dataplane: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NetworkDataplane + :paramtype network_dataplane: str or ~azure.mgmt.containerservice.models.NetworkDataplane + :keyword advanced_networking: Advanced Networking profile for enabling observability and + security feature suite on a cluster. For more information see aka.ms/aksadvancednetworking. + :paramtype advanced_networking: ~azure.mgmt.containerservice.models.AdvancedNetworking :keyword pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used. :paramtype pod_cidr: str :keyword service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It @@ -1314,33 +1593,39 @@ def __init__( :keyword dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr. :paramtype dns_service_ip: str - :keyword outbound_type: This can only be set at cluster creation time and cannot be changed - later. For more information see `egress outbound type + :keyword outbound_type: The outbound (egress) routing method. This can only be set at cluster + creation time and cannot be changed later. For more information see `egress outbound type `_. Known values are: "loadBalancer", - "userDefinedRouting", "managedNATGateway", and "userAssignedNATGateway". - :paramtype outbound_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OutboundType - :keyword load_balancer_sku: The default is 'standard'. See `Azure Load Balancer SKUs + "userDefinedRouting", "managedNATGateway", "userAssignedNATGateway", and "none". + :paramtype outbound_type: str or ~azure.mgmt.containerservice.models.OutboundType + :keyword load_balancer_sku: The load balancer sku for the managed cluster. The default is + 'standard'. See `Azure Load Balancer SKUs `_ for more information about the differences between load balancer SKUs. Known values are: "standard" and "basic". - :paramtype load_balancer_sku: str or - ~azure.mgmt.containerservice.v2024_07_01.models.LoadBalancerSku + :paramtype load_balancer_sku: str or ~azure.mgmt.containerservice.models.LoadBalancerSku :keyword load_balancer_profile: Profile of the cluster load balancer. :paramtype load_balancer_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfile + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfile :keyword nat_gateway_profile: Profile of the cluster NAT gateway. :paramtype nat_gateway_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterNATGatewayProfile - :keyword pod_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for - each IP family (IPv4/IPv6), is expected for dual-stack networking. + ~azure.mgmt.containerservice.models.ManagedClusterNATGatewayProfile + :keyword static_egress_gateway_profile: The profile for Static Egress Gateway addon. For more + details about Static Egress Gateway, see https://aka.ms/aks/static-egress-gateway. + :paramtype static_egress_gateway_profile: + ~azure.mgmt.containerservice.models.ManagedClusterStaticEgressGatewayProfile + :keyword pod_cidrs: The CIDR notation IP ranges from which to assign pod IPs. One IPv4 CIDR is + expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is + expected for dual-stack networking. :paramtype pod_cidrs: list[str] - :keyword service_cidrs: One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one - for each IP family (IPv4/IPv6), is expected for dual-stack networking. They must not overlap - with any Subnet IP ranges. + :keyword service_cidrs: The CIDR notation IP ranges from which to assign service cluster IPs. + One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family + (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with any Subnet IP + ranges. :paramtype service_cidrs: list[str] - :keyword ip_families: IP families are used to determine single-stack or dual-stack clusters. - For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and - IPv6. - :paramtype ip_families: list[str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily] + :keyword ip_families: The IP families used to specify IP versions available to the cluster. IP + families are used to determine single-stack or dual-stack clusters. For single-stack, the + expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. + :paramtype ip_families: list[str or ~azure.mgmt.containerservice.models.IpFamily] """ super().__init__(**kwargs) self.network_plugin = network_plugin @@ -1348,6 +1633,7 @@ def __init__( self.network_policy = network_policy self.network_mode = network_mode self.network_dataplane = network_dataplane + self.advanced_networking = advanced_networking self.pod_cidr = pod_cidr self.service_cidr = service_cidr self.dns_service_ip = dns_service_ip @@ -1355,6 +1641,7 @@ def __init__( self.load_balancer_sku = load_balancer_sku self.load_balancer_profile = load_balancer_profile self.nat_gateway_profile = nat_gateway_profile + self.static_egress_gateway_profile = static_egress_gateway_profile self.pod_cidrs = pod_cidrs self.service_cidrs = service_cidrs self.ip_families = ip_families @@ -1367,8 +1654,7 @@ class ContainerServiceSshConfiguration(_serialization.Model): :ivar public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A maximum of 1 key may be specified. Required. - :vartype public_keys: - list[~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshPublicKey] + :vartype public_keys: list[~azure.mgmt.containerservice.models.ContainerServiceSshPublicKey] """ _validation = { @@ -1379,12 +1665,11 @@ class ContainerServiceSshConfiguration(_serialization.Model): "public_keys": {"key": "publicKeys", "type": "[ContainerServiceSshPublicKey]"}, } - def __init__(self, *, public_keys: List["_models.ContainerServiceSshPublicKey"], **kwargs: Any) -> None: + def __init__(self, *, public_keys: list["_models.ContainerServiceSshPublicKey"], **kwargs: Any) -> None: """ :keyword public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A maximum of 1 key may be specified. Required. - :paramtype public_keys: - list[~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceSshPublicKey] + :paramtype public_keys: list[~azure.mgmt.containerservice.models.ContainerServiceSshPublicKey] """ super().__init__(**kwargs) self.public_keys = public_keys @@ -1464,8 +1749,8 @@ class CredentialResult(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.name = None - self.value = None + self.name: Optional[str] = None + self.value: Optional[bytes] = None class CredentialResults(_serialization.Model): @@ -1474,7 +1759,7 @@ class CredentialResults(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar kubeconfigs: Base64-encoded Kubernetes configuration file. - :vartype kubeconfigs: list[~azure.mgmt.containerservice.v2024_07_01.models.CredentialResult] + :vartype kubeconfigs: list[~azure.mgmt.containerservice.models.CredentialResult] """ _validation = { @@ -1488,7 +1773,7 @@ class CredentialResults(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.kubeconfigs = None + self.kubeconfigs: Optional[list["_models.CredentialResult"]] = None class DailySchedule(_serialization.Model): @@ -1518,7 +1803,7 @@ def __init__(self, *, interval_days: int, **kwargs: Any) -> None: class DateSpan(_serialization.Model): - """For example, between '2022-12-23' and '2023-01-05'. + """A date range. For example, between '2022-12-23' and '2023-01-05'. All required parameters must be populated in order to send to server. @@ -1604,7 +1889,7 @@ class EndpointDependency(_serialization.Model): :ivar domain_name: The domain name of the dependency. :vartype domain_name: str :ivar endpoint_details: The Ports and Protocols used when connecting to domainName. - :vartype endpoint_details: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDetail] + :vartype endpoint_details: list[~azure.mgmt.containerservice.models.EndpointDetail] """ _attribute_map = { @@ -1616,15 +1901,14 @@ def __init__( self, *, domain_name: Optional[str] = None, - endpoint_details: Optional[List["_models.EndpointDetail"]] = None, + endpoint_details: Optional[list["_models.EndpointDetail"]] = None, **kwargs: Any ) -> None: """ :keyword domain_name: The domain name of the dependency. :paramtype domain_name: str :keyword endpoint_details: The Ports and Protocols used when connecting to domainName. - :paramtype endpoint_details: - list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDetail] + :paramtype endpoint_details: list[~azure.mgmt.containerservice.models.EndpointDetail] """ super().__init__(**kwargs) self.domain_name = domain_name @@ -1701,8 +1985,8 @@ class ErrorAdditionalInfo(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.type = None - self.info = None + self.type: Optional[str] = None + self.info: Optional[JSON] = None class ErrorDetail(_serialization.Model): @@ -1717,10 +2001,9 @@ class ErrorDetail(_serialization.Model): :ivar target: The error target. :vartype target: str :ivar details: The error details. - :vartype details: list[~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail] + :vartype details: list[~azure.mgmt.containerservice.models.ErrorDetail] :ivar additional_info: The error additional info. - :vartype additional_info: - list[~azure.mgmt.containerservice.v2024_07_01.models.ErrorAdditionalInfo] + :vartype additional_info: list[~azure.mgmt.containerservice.models.ErrorAdditionalInfo] """ _validation = { @@ -1742,11 +2025,11 @@ class ErrorDetail(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.code = None - self.message = None - self.target = None - self.details = None - self.additional_info = None + self.code: Optional[str] = None + self.message: Optional[str] = None + self.target: Optional[str] = None + self.details: Optional[list["_models.ErrorDetail"]] = None + self.additional_info: Optional[list["_models.ErrorAdditionalInfo"]] = None class ErrorResponse(_serialization.Model): @@ -1754,7 +2037,7 @@ class ErrorResponse(_serialization.Model): operations. (This also follows the OData error response format.). :ivar error: The error object. - :vartype error: ~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail + :vartype error: ~azure.mgmt.containerservice.models.ErrorDetail """ _attribute_map = { @@ -1764,7 +2047,7 @@ class ErrorResponse(_serialization.Model): def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: """ :keyword error: The error object. - :paramtype error: ~azure.mgmt.containerservice.v2024_07_01.models.ErrorDetail + :paramtype error: ~azure.mgmt.containerservice.models.ErrorDetail """ super().__init__(**kwargs) self.error = error @@ -1776,7 +2059,7 @@ class ExtendedLocation(_serialization.Model): :ivar name: The name of the extended location. :vartype name: str :ivar type: The type of the extended location. "EdgeZone" - :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocationTypes + :vartype type: str or ~azure.mgmt.containerservice.models.ExtendedLocationTypes """ _attribute_map = { @@ -1795,13 +2078,35 @@ def __init__( :keyword name: The name of the extended location. :paramtype name: str :keyword type: The type of the extended location. "EdgeZone" - :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocationTypes + :paramtype type: str or ~azure.mgmt.containerservice.models.ExtendedLocationTypes """ super().__init__(**kwargs) self.name = name self.type = type +class GPUProfile(_serialization.Model): + """GPU settings for the Agent Pool. + + :ivar driver: Whether to install GPU drivers. When it's not specified, default is Install. + Known values are: "Install" and "None". + :vartype driver: str or ~azure.mgmt.containerservice.models.GPUDriver + """ + + _attribute_map = { + "driver": {"key": "driver", "type": "str"}, + } + + def __init__(self, *, driver: Optional[Union[str, "_models.GPUDriver"]] = None, **kwargs: Any) -> None: + """ + :keyword driver: Whether to install GPU drivers. When it's not specified, default is Install. + Known values are: "Install" and "None". + :paramtype driver: str or ~azure.mgmt.containerservice.models.GPUDriver + """ + super().__init__(**kwargs) + self.driver = driver + + class IPTag(_serialization.Model): """Contains the IPTag associated with the object. @@ -1833,8 +2138,7 @@ class IstioCertificateAuthority(_serialization.Model): certificates as described here https://aka.ms/asm-plugin-ca. :ivar plugin: Plugin certificates information for Service Mesh. - :vartype plugin: - ~azure.mgmt.containerservice.v2024_07_01.models.IstioPluginCertificateAuthority + :vartype plugin: ~azure.mgmt.containerservice.models.IstioPluginCertificateAuthority """ _attribute_map = { @@ -1844,8 +2148,7 @@ class IstioCertificateAuthority(_serialization.Model): def __init__(self, *, plugin: Optional["_models.IstioPluginCertificateAuthority"] = None, **kwargs: Any) -> None: """ :keyword plugin: Plugin certificates information for Service Mesh. - :paramtype plugin: - ~azure.mgmt.containerservice.v2024_07_01.models.IstioPluginCertificateAuthority + :paramtype plugin: ~azure.mgmt.containerservice.models.IstioPluginCertificateAuthority """ super().__init__(**kwargs) self.plugin = plugin @@ -1855,11 +2158,9 @@ class IstioComponents(_serialization.Model): """Istio components configuration. :ivar ingress_gateways: Istio ingress gateways. - :vartype ingress_gateways: - list[~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGateway] + :vartype ingress_gateways: list[~azure.mgmt.containerservice.models.IstioIngressGateway] :ivar egress_gateways: Istio egress gateways. - :vartype egress_gateways: - list[~azure.mgmt.containerservice.v2024_07_01.models.IstioEgressGateway] + :vartype egress_gateways: list[~azure.mgmt.containerservice.models.IstioEgressGateway] """ _attribute_map = { @@ -1870,17 +2171,15 @@ class IstioComponents(_serialization.Model): def __init__( self, *, - ingress_gateways: Optional[List["_models.IstioIngressGateway"]] = None, - egress_gateways: Optional[List["_models.IstioEgressGateway"]] = None, + ingress_gateways: Optional[list["_models.IstioIngressGateway"]] = None, + egress_gateways: Optional[list["_models.IstioEgressGateway"]] = None, **kwargs: Any ) -> None: """ :keyword ingress_gateways: Istio ingress gateways. - :paramtype ingress_gateways: - list[~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGateway] + :paramtype ingress_gateways: list[~azure.mgmt.containerservice.models.IstioIngressGateway] :keyword egress_gateways: Istio egress gateways. - :paramtype egress_gateways: - list[~azure.mgmt.containerservice.v2024_07_01.models.IstioEgressGateway] + :paramtype egress_gateways: list[~azure.mgmt.containerservice.models.IstioEgressGateway] """ super().__init__(**kwargs) self.ingress_gateways = ingress_gateways @@ -1894,23 +2193,56 @@ class IstioEgressGateway(_serialization.Model): :ivar enabled: Whether to enable the egress gateway. Required. :vartype enabled: bool + :ivar name: Name of the Istio add-on egress gateway. Required. + :vartype name: str + :ivar namespace: Namespace that the Istio add-on egress gateway should be deployed in. If + unspecified, the default is aks-istio-egress. + :vartype namespace: str + :ivar gateway_configuration_name: Name of the gateway configuration custom resource for the + Istio add-on egress gateway. Must be specified when enabling the Istio egress gateway. Must be + deployed in the same namespace that the Istio egress gateway will be deployed in. + :vartype gateway_configuration_name: str """ _validation = { "enabled": {"required": True}, + "name": {"required": True, "pattern": r"[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*"}, } _attribute_map = { "enabled": {"key": "enabled", "type": "bool"}, + "name": {"key": "name", "type": "str"}, + "namespace": {"key": "namespace", "type": "str"}, + "gateway_configuration_name": {"key": "gatewayConfigurationName", "type": "str"}, } - def __init__(self, *, enabled: bool, **kwargs: Any) -> None: + def __init__( + self, + *, + enabled: bool, + name: str, + namespace: Optional[str] = None, + gateway_configuration_name: Optional[str] = None, + **kwargs: Any + ) -> None: """ :keyword enabled: Whether to enable the egress gateway. Required. :paramtype enabled: bool + :keyword name: Name of the Istio add-on egress gateway. Required. + :paramtype name: str + :keyword namespace: Namespace that the Istio add-on egress gateway should be deployed in. If + unspecified, the default is aks-istio-egress. + :paramtype namespace: str + :keyword gateway_configuration_name: Name of the gateway configuration custom resource for the + Istio add-on egress gateway. Must be specified when enabling the Istio egress gateway. Must be + deployed in the same namespace that the Istio egress gateway will be deployed in. + :paramtype gateway_configuration_name: str """ super().__init__(**kwargs) self.enabled = enabled + self.name = name + self.namespace = namespace + self.gateway_configuration_name = gateway_configuration_name class IstioIngressGateway(_serialization.Model): @@ -1921,7 +2253,7 @@ class IstioIngressGateway(_serialization.Model): All required parameters must be populated in order to send to server. :ivar mode: Mode of an ingress gateway. Required. Known values are: "External" and "Internal". - :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGatewayMode + :vartype mode: str or ~azure.mgmt.containerservice.models.IstioIngressGatewayMode :ivar enabled: Whether to enable the ingress gateway. Required. :vartype enabled: bool """ @@ -1940,7 +2272,7 @@ def __init__(self, *, mode: Union[str, "_models.IstioIngressGatewayMode"], enabl """ :keyword mode: Mode of an ingress gateway. Required. Known values are: "External" and "Internal". - :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.IstioIngressGatewayMode + :paramtype mode: str or ~azure.mgmt.containerservice.models.IstioIngressGatewayMode :keyword enabled: Whether to enable the ingress gateway. Required. :paramtype enabled: bool """ @@ -2006,11 +2338,10 @@ class IstioServiceMesh(_serialization.Model): """Istio service mesh configuration. :ivar components: Istio components configuration. - :vartype components: ~azure.mgmt.containerservice.v2024_07_01.models.IstioComponents + :vartype components: ~azure.mgmt.containerservice.models.IstioComponents :ivar certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration. For now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca. - :vartype certificate_authority: - ~azure.mgmt.containerservice.v2024_07_01.models.IstioCertificateAuthority + :vartype certificate_authority: ~azure.mgmt.containerservice.models.IstioCertificateAuthority :ivar revisions: The list of revisions of the Istio control plane. When an upgrade is not in progress, this holds one value. When canary upgrade is in progress, this can only hold two consecutive values. For more information, see: @@ -2033,16 +2364,15 @@ def __init__( *, components: Optional["_models.IstioComponents"] = None, certificate_authority: Optional["_models.IstioCertificateAuthority"] = None, - revisions: Optional[List[str]] = None, + revisions: Optional[list[str]] = None, **kwargs: Any ) -> None: """ :keyword components: Istio components configuration. - :paramtype components: ~azure.mgmt.containerservice.v2024_07_01.models.IstioComponents + :paramtype components: ~azure.mgmt.containerservice.models.IstioComponents :keyword certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration. For now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca. - :paramtype certificate_authority: - ~azure.mgmt.containerservice.v2024_07_01.models.IstioCertificateAuthority + :paramtype certificate_authority: ~azure.mgmt.containerservice.models.IstioCertificateAuthority :keyword revisions: The list of revisions of the Istio control plane. When an upgrade is not in progress, this holds one value. When canary upgrade is in progress, this can only hold two consecutive values. For more information, see: @@ -2055,27 +2385,30 @@ def __init__( self.revisions = revisions -class KubeletConfig(_serialization.Model): # pylint: disable=too-many-instance-attributes - """See `AKS custom node configuration +class KubeletConfig(_serialization.Model): + """Kubelet configurations of agent nodes. See `AKS custom node configuration `_ for more details. - :ivar cpu_manager_policy: The default is 'none'. See `Kubernetes CPU management policies + :ivar cpu_manager_policy: The CPU Manager policy to use. The default is 'none'. See `Kubernetes + CPU management policies `_ for more information. Allowed values are 'none' and 'static'. :vartype cpu_manager_policy: str - :ivar cpu_cfs_quota: The default is true. + :ivar cpu_cfs_quota: If CPU CFS quota enforcement is enabled for containers that specify CPU + limits. The default is true. :vartype cpu_cfs_quota: bool - :ivar cpu_cfs_quota_period: The default is '100ms.' Valid values are a sequence of decimal - numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported - units are 'ns', 'us', 'ms', 's', 'm', and 'h'. + :ivar cpu_cfs_quota_period: The CPU CFS quota period value. The default is '100ms.' Valid + values are a sequence of decimal numbers with an optional fraction and a unit suffix. For + example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'. :vartype cpu_cfs_quota_period: str - :ivar image_gc_high_threshold: To disable image garbage collection, set to 100. The default is - 85%. + :ivar image_gc_high_threshold: The percent of disk usage after which image garbage collection + is always run. To disable image garbage collection, set to 100. The default is 85%. :vartype image_gc_high_threshold: int - :ivar image_gc_low_threshold: This cannot be set higher than imageGcHighThreshold. The default - is 80%. + :ivar image_gc_low_threshold: The percent of disk usage before which image garbage collection + is never run. This cannot be set higher than imageGcHighThreshold. The default is 80%. :vartype image_gc_low_threshold: int - :ivar topology_manager_policy: For more information see `Kubernetes Topology Manager + :ivar topology_manager_policy: The Topology Manager policy to use. For more information see + `Kubernetes Topology Manager `_. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'. :vartype topology_manager_policy: str @@ -2122,7 +2455,7 @@ def __init__( image_gc_high_threshold: Optional[int] = None, image_gc_low_threshold: Optional[int] = None, topology_manager_policy: Optional[str] = None, - allowed_unsafe_sysctls: Optional[List[str]] = None, + allowed_unsafe_sysctls: Optional[list[str]] = None, fail_swap_on: Optional[bool] = None, container_log_max_size_mb: Optional[int] = None, container_log_max_files: Optional[int] = None, @@ -2130,23 +2463,27 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword cpu_manager_policy: The default is 'none'. See `Kubernetes CPU management policies + :keyword cpu_manager_policy: The CPU Manager policy to use. The default is 'none'. See + `Kubernetes CPU management policies `_ for more information. Allowed values are 'none' and 'static'. :paramtype cpu_manager_policy: str - :keyword cpu_cfs_quota: The default is true. + :keyword cpu_cfs_quota: If CPU CFS quota enforcement is enabled for containers that specify CPU + limits. The default is true. :paramtype cpu_cfs_quota: bool - :keyword cpu_cfs_quota_period: The default is '100ms.' Valid values are a sequence of decimal - numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported - units are 'ns', 'us', 'ms', 's', 'm', and 'h'. + :keyword cpu_cfs_quota_period: The CPU CFS quota period value. The default is '100ms.' Valid + values are a sequence of decimal numbers with an optional fraction and a unit suffix. For + example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'. :paramtype cpu_cfs_quota_period: str - :keyword image_gc_high_threshold: To disable image garbage collection, set to 100. The default - is 85%. + :keyword image_gc_high_threshold: The percent of disk usage after which image garbage + collection is always run. To disable image garbage collection, set to 100. The default is 85%. :paramtype image_gc_high_threshold: int - :keyword image_gc_low_threshold: This cannot be set higher than imageGcHighThreshold. The - default is 80%. + :keyword image_gc_low_threshold: The percent of disk usage before which image garbage + collection is never run. This cannot be set higher than imageGcHighThreshold. The default is + 80%. :paramtype image_gc_low_threshold: int - :keyword topology_manager_policy: For more information see `Kubernetes Topology Manager + :keyword topology_manager_policy: The Topology Manager policy to use. For more information see + `Kubernetes Topology Manager `_. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'. :paramtype topology_manager_policy: str @@ -2190,7 +2527,7 @@ class KubernetesPatchVersion(_serialization.Model): "upgrades": {"key": "upgrades", "type": "[str]"}, } - def __init__(self, *, upgrades: Optional[List[str]] = None, **kwargs: Any) -> None: + def __init__(self, *, upgrades: Optional[list[str]] = None, **kwargs: Any) -> None: """ :keyword upgrades: Possible upgrade path for given patch version. :paramtype upgrades: list[str] @@ -2205,15 +2542,13 @@ class KubernetesVersion(_serialization.Model): :ivar version: major.minor version of Kubernetes release. :vartype version: str :ivar capabilities: Capabilities on this Kubernetes version. - :vartype capabilities: - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionCapabilities + :vartype capabilities: ~azure.mgmt.containerservice.models.KubernetesVersionCapabilities :ivar is_default: Whether this version is default. :vartype is_default: bool :ivar is_preview: Whether this version is in preview mode. :vartype is_preview: bool :ivar patch_versions: Patch versions of Kubernetes release. - :vartype patch_versions: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesPatchVersion] + :vartype patch_versions: dict[str, ~azure.mgmt.containerservice.models.KubernetesPatchVersion] """ _attribute_map = { @@ -2231,22 +2566,21 @@ def __init__( capabilities: Optional["_models.KubernetesVersionCapabilities"] = None, is_default: Optional[bool] = None, is_preview: Optional[bool] = None, - patch_versions: Optional[Dict[str, "_models.KubernetesPatchVersion"]] = None, + patch_versions: Optional[dict[str, "_models.KubernetesPatchVersion"]] = None, **kwargs: Any ) -> None: """ :keyword version: major.minor version of Kubernetes release. :paramtype version: str :keyword capabilities: Capabilities on this Kubernetes version. - :paramtype capabilities: - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionCapabilities + :paramtype capabilities: ~azure.mgmt.containerservice.models.KubernetesVersionCapabilities :keyword is_default: Whether this version is default. :paramtype is_default: bool :keyword is_preview: Whether this version is in preview mode. :paramtype is_preview: bool :keyword patch_versions: Patch versions of Kubernetes release. :paramtype patch_versions: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesPatchVersion] + ~azure.mgmt.containerservice.models.KubernetesPatchVersion] """ super().__init__(**kwargs) self.version = version @@ -2260,8 +2594,7 @@ class KubernetesVersionCapabilities(_serialization.Model): """Capabilities on this Kubernetes version. :ivar support_plan: - :vartype support_plan: list[str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan] + :vartype support_plan: list[str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan] """ _attribute_map = { @@ -2269,12 +2602,11 @@ class KubernetesVersionCapabilities(_serialization.Model): } def __init__( - self, *, support_plan: Optional[List[Union[str, "_models.KubernetesSupportPlan"]]] = None, **kwargs: Any + self, *, support_plan: Optional[list[Union[str, "_models.KubernetesSupportPlan"]]] = None, **kwargs: Any ) -> None: """ :keyword support_plan: - :paramtype support_plan: list[str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan] + :paramtype support_plan: list[str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan] """ super().__init__(**kwargs) self.support_plan = support_plan @@ -2284,35 +2616,37 @@ class KubernetesVersionListResult(_serialization.Model): """Hold values properties, which is array of KubernetesVersion. :ivar values: Array of AKS supported Kubernetes versions. - :vartype values: list[~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersion] + :vartype values: list[~azure.mgmt.containerservice.models.KubernetesVersion] """ _attribute_map = { "values": {"key": "values", "type": "[KubernetesVersion]"}, } - def __init__(self, *, values: Optional[List["_models.KubernetesVersion"]] = None, **kwargs: Any) -> None: + def __init__(self, *, values: Optional[list["_models.KubernetesVersion"]] = None, **kwargs: Any) -> None: """ :keyword values: Array of AKS supported Kubernetes versions. - :paramtype values: list[~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersion] + :paramtype values: list[~azure.mgmt.containerservice.models.KubernetesVersion] """ super().__init__(**kwargs) self.values = values class LinuxOSConfig(_serialization.Model): - """See `AKS custom node configuration + """OS configurations of Linux agent nodes. See `AKS custom node configuration `_ for more details. :ivar sysctls: Sysctl settings for Linux agent nodes. - :vartype sysctls: ~azure.mgmt.containerservice.v2024_07_01.models.SysctlConfig - :ivar transparent_huge_page_enabled: Valid values are 'always', 'madvise', and 'never'. The - default is 'always'. For more information see `Transparent Hugepages + :vartype sysctls: ~azure.mgmt.containerservice.models.SysctlConfig + :ivar transparent_huge_page_enabled: Whether transparent hugepages are enabled. Valid values + are 'always', 'madvise', and 'never'. The default is 'always'. For more information see + `Transparent Hugepages `_. :vartype transparent_huge_page_enabled: str - :ivar transparent_huge_page_defrag: Valid values are 'always', 'defer', 'defer+madvise', - 'madvise' and 'never'. The default is 'madvise'. For more information see `Transparent - Hugepages + :ivar transparent_huge_page_defrag: Whether the kernel should make aggressive use of memory + compaction to make more hugepages available. Valid values are 'always', 'defer', + 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see + `Transparent Hugepages `_. :vartype transparent_huge_page_defrag: str :ivar swap_file_size_mb: The size in MB of a swap file that will be created on each node. @@ -2337,14 +2671,16 @@ def __init__( ) -> None: """ :keyword sysctls: Sysctl settings for Linux agent nodes. - :paramtype sysctls: ~azure.mgmt.containerservice.v2024_07_01.models.SysctlConfig - :keyword transparent_huge_page_enabled: Valid values are 'always', 'madvise', and 'never'. The - default is 'always'. For more information see `Transparent Hugepages + :paramtype sysctls: ~azure.mgmt.containerservice.models.SysctlConfig + :keyword transparent_huge_page_enabled: Whether transparent hugepages are enabled. Valid values + are 'always', 'madvise', and 'never'. The default is 'always'. For more information see + `Transparent Hugepages `_. :paramtype transparent_huge_page_enabled: str - :keyword transparent_huge_page_defrag: Valid values are 'always', 'defer', 'defer+madvise', - 'madvise' and 'never'. The default is 'madvise'. For more information see `Transparent - Hugepages + :keyword transparent_huge_page_defrag: Whether the kernel should make aggressive use of memory + compaction to make more hugepages available. Valid values are 'always', 'defer', + 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see + `Transparent Hugepages `_. :paramtype transparent_huge_page_defrag: str :keyword swap_file_size_mb: The size in MB of a swap file that will be created on each node. @@ -2357,6 +2693,159 @@ def __init__( self.swap_file_size_mb = swap_file_size_mb +class LocalDNSOverride(_serialization.Model): + """Overrides for localDNS profile. + + :ivar query_logging: Log level for DNS queries in localDNS. Known values are: "Error" and + "Log". + :vartype query_logging: str or ~azure.mgmt.containerservice.models.LocalDNSQueryLogging + :ivar protocol: Enforce TCP or prefer UDP protocol for connections from localDNS to upstream + DNS server. Known values are: "PreferUDP" and "ForceTCP". + :vartype protocol: str or ~azure.mgmt.containerservice.models.LocalDNSProtocol + :ivar forward_destination: Destination server for DNS queries to be forwarded from localDNS. + Known values are: "ClusterCoreDNS" and "VnetDNS". + :vartype forward_destination: str or + ~azure.mgmt.containerservice.models.LocalDNSForwardDestination + :ivar forward_policy: Forward policy for selecting upstream DNS server. See `forward plugin + `_ for more information. Known values are: "Sequential", + "RoundRobin", and "Random". + :vartype forward_policy: str or ~azure.mgmt.containerservice.models.LocalDNSForwardPolicy + :ivar max_concurrent: Maximum number of concurrent queries. See `forward plugin + `_ for more information. + :vartype max_concurrent: int + :ivar cache_duration_in_seconds: Cache max TTL in seconds. See `cache plugin + `_ for more information. + :vartype cache_duration_in_seconds: int + :ivar serve_stale_duration_in_seconds: Serve stale duration in seconds. See `cache plugin + `_ for more information. + :vartype serve_stale_duration_in_seconds: int + :ivar serve_stale: Policy for serving stale data. See `cache plugin + `_ for more information. Known values are: "Verify", + "Immediate", and "Disable". + :vartype serve_stale: str or ~azure.mgmt.containerservice.models.LocalDNSServeStale + """ + + _attribute_map = { + "query_logging": {"key": "queryLogging", "type": "str"}, + "protocol": {"key": "protocol", "type": "str"}, + "forward_destination": {"key": "forwardDestination", "type": "str"}, + "forward_policy": {"key": "forwardPolicy", "type": "str"}, + "max_concurrent": {"key": "maxConcurrent", "type": "int"}, + "cache_duration_in_seconds": {"key": "cacheDurationInSeconds", "type": "int"}, + "serve_stale_duration_in_seconds": {"key": "serveStaleDurationInSeconds", "type": "int"}, + "serve_stale": {"key": "serveStale", "type": "str"}, + } + + def __init__( + self, + *, + query_logging: Union[str, "_models.LocalDNSQueryLogging"] = "Error", + protocol: Union[str, "_models.LocalDNSProtocol"] = "PreferUDP", + forward_destination: Union[str, "_models.LocalDNSForwardDestination"] = "ClusterCoreDNS", + forward_policy: Union[str, "_models.LocalDNSForwardPolicy"] = "Sequential", + max_concurrent: int = 1000, + cache_duration_in_seconds: int = 3600, + serve_stale_duration_in_seconds: int = 3600, + serve_stale: Union[str, "_models.LocalDNSServeStale"] = "Immediate", + **kwargs: Any + ) -> None: + """ + :keyword query_logging: Log level for DNS queries in localDNS. Known values are: "Error" and + "Log". + :paramtype query_logging: str or ~azure.mgmt.containerservice.models.LocalDNSQueryLogging + :keyword protocol: Enforce TCP or prefer UDP protocol for connections from localDNS to upstream + DNS server. Known values are: "PreferUDP" and "ForceTCP". + :paramtype protocol: str or ~azure.mgmt.containerservice.models.LocalDNSProtocol + :keyword forward_destination: Destination server for DNS queries to be forwarded from localDNS. + Known values are: "ClusterCoreDNS" and "VnetDNS". + :paramtype forward_destination: str or + ~azure.mgmt.containerservice.models.LocalDNSForwardDestination + :keyword forward_policy: Forward policy for selecting upstream DNS server. See `forward plugin + `_ for more information. Known values are: "Sequential", + "RoundRobin", and "Random". + :paramtype forward_policy: str or ~azure.mgmt.containerservice.models.LocalDNSForwardPolicy + :keyword max_concurrent: Maximum number of concurrent queries. See `forward plugin + `_ for more information. + :paramtype max_concurrent: int + :keyword cache_duration_in_seconds: Cache max TTL in seconds. See `cache plugin + `_ for more information. + :paramtype cache_duration_in_seconds: int + :keyword serve_stale_duration_in_seconds: Serve stale duration in seconds. See `cache plugin + `_ for more information. + :paramtype serve_stale_duration_in_seconds: int + :keyword serve_stale: Policy for serving stale data. See `cache plugin + `_ for more information. Known values are: "Verify", + "Immediate", and "Disable". + :paramtype serve_stale: str or ~azure.mgmt.containerservice.models.LocalDNSServeStale + """ + super().__init__(**kwargs) + self.query_logging = query_logging + self.protocol = protocol + self.forward_destination = forward_destination + self.forward_policy = forward_policy + self.max_concurrent = max_concurrent + self.cache_duration_in_seconds = cache_duration_in_seconds + self.serve_stale_duration_in_seconds = serve_stale_duration_in_seconds + self.serve_stale = serve_stale + + +class LocalDNSProfile(_serialization.Model): + """Configures the per-node local DNS, with VnetDNS and KubeDNS overrides. LocalDNS helps improve + performance and reliability of DNS resolution in an AKS cluster. For more details see + aka.ms/aks/localdns. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar mode: Mode of enablement for localDNS. Known values are: "Preferred", "Required", and + "Disabled". + :vartype mode: str or ~azure.mgmt.containerservice.models.LocalDNSMode + :ivar state: System-generated state of localDNS. Known values are: "Enabled" and "Disabled". + :vartype state: str or ~azure.mgmt.containerservice.models.LocalDNSState + :ivar vnet_dns_overrides: VnetDNS overrides apply to DNS traffic from pods with + dnsPolicy:default or kubelet (referred to as VnetDNS traffic). + :vartype vnet_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride] + :ivar kube_dns_overrides: KubeDNS overrides apply to DNS traffic from pods with + dnsPolicy:ClusterFirst (referred to as KubeDNS traffic). + :vartype kube_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride] + """ + + _validation = { + "state": {"readonly": True}, + } + + _attribute_map = { + "mode": {"key": "mode", "type": "str"}, + "state": {"key": "state", "type": "str"}, + "vnet_dns_overrides": {"key": "vnetDNSOverrides", "type": "{LocalDNSOverride}"}, + "kube_dns_overrides": {"key": "kubeDNSOverrides", "type": "{LocalDNSOverride}"}, + } + + def __init__( + self, + *, + mode: Union[str, "_models.LocalDNSMode"] = "Preferred", + vnet_dns_overrides: Optional[dict[str, "_models.LocalDNSOverride"]] = None, + kube_dns_overrides: Optional[dict[str, "_models.LocalDNSOverride"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword mode: Mode of enablement for localDNS. Known values are: "Preferred", "Required", and + "Disabled". + :paramtype mode: str or ~azure.mgmt.containerservice.models.LocalDNSMode + :keyword vnet_dns_overrides: VnetDNS overrides apply to DNS traffic from pods with + dnsPolicy:default or kubelet (referred to as VnetDNS traffic). + :paramtype vnet_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride] + :keyword kube_dns_overrides: KubeDNS overrides apply to DNS traffic from pods with + dnsPolicy:ClusterFirst (referred to as KubeDNS traffic). + :paramtype kube_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride] + """ + super().__init__(**kwargs) + self.mode = mode + self.state: Optional[Union[str, "_models.LocalDNSState"]] = None + self.vnet_dns_overrides = vnet_dns_overrides + self.kube_dns_overrides = kube_dns_overrides + + class Machine(SubResource): """A machine. Contains details about the underlying virtual machine. A machine may be visible here but not in kubectl get nodes; if so it may be because the machine has not been registered with @@ -2371,14 +2860,17 @@ class Machine(SubResource): :vartype name: str :ivar type: Resource type. :vartype type: str + :ivar zones: The Availability zone in which machine is located. + :vartype zones: list[str] :ivar properties: The properties of the machine. - :vartype properties: ~azure.mgmt.containerservice.v2024_07_01.models.MachineProperties + :vartype properties: ~azure.mgmt.containerservice.models.MachineProperties """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, + "zones": {"readonly": True}, "properties": {"readonly": True}, } @@ -2386,13 +2878,15 @@ class Machine(SubResource): "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, + "zones": {"key": "zones", "type": "[str]"}, "properties": {"key": "properties", "type": "MachineProperties"}, } def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.properties = None + self.zones: Optional[list[str]] = None + self.properties: Optional["_models.MachineProperties"] = None class MachineIpAddress(_serialization.Model): @@ -2402,7 +2896,7 @@ class MachineIpAddress(_serialization.Model): :ivar family: To determine if address belongs IPv4 or IPv6 family. Known values are: "IPv4" and "IPv6". - :vartype family: str or ~azure.mgmt.containerservice.v2024_07_01.models.IpFamily + :vartype family: str or ~azure.mgmt.containerservice.models.IpFamily :ivar ip: IPv4 or IPv6 address of the machine. :vartype ip: str """ @@ -2420,8 +2914,8 @@ class MachineIpAddress(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.family = None - self.ip = None + self.family: Optional[Union[str, "_models.IpFamily"]] = None + self.ip: Optional[str] = None class MachineListResult(_serialization.Model): @@ -2432,7 +2926,7 @@ class MachineListResult(_serialization.Model): :ivar next_link: The URL to get the next set of machine results. :vartype next_link: str :ivar value: The list of Machines in cluster. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :vartype value: list[~azure.mgmt.containerservice.models.Machine] """ _validation = { @@ -2444,13 +2938,13 @@ class MachineListResult(_serialization.Model): "value": {"key": "value", "type": "[Machine]"}, } - def __init__(self, *, value: Optional[List["_models.Machine"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.Machine"]] = None, **kwargs: Any) -> None: """ :keyword value: The list of Machines in cluster. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :paramtype value: list[~azure.mgmt.containerservice.models.Machine] """ super().__init__(**kwargs) - self.next_link = None + self.next_link: Optional[str] = None self.value = value @@ -2460,7 +2954,7 @@ class MachineNetworkProperties(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar ip_addresses: IPv4, IPv6 addresses of the machine. - :vartype ip_addresses: list[~azure.mgmt.containerservice.v2024_07_01.models.MachineIpAddress] + :vartype ip_addresses: list[~azure.mgmt.containerservice.models.MachineIpAddress] """ _validation = { @@ -2474,7 +2968,7 @@ class MachineNetworkProperties(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.ip_addresses = None + self.ip_addresses: Optional[list["_models.MachineIpAddress"]] = None class MachineProperties(_serialization.Model): @@ -2483,7 +2977,7 @@ class MachineProperties(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar network: network properties of the machine. - :vartype network: ~azure.mgmt.containerservice.v2024_07_01.models.MachineNetworkProperties + :vartype network: ~azure.mgmt.containerservice.models.MachineNetworkProperties :ivar resource_id: Azure resource id of the machine. It can be used to GET underlying VM Instance. :vartype resource_id: str @@ -2502,13 +2996,14 @@ class MachineProperties(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.network = None - self.resource_id = None + self.network: Optional["_models.MachineNetworkProperties"] = None + self.resource_id: Optional[str] = None class MaintenanceConfiguration(SubResource): - """See `planned maintenance `_ for more - information about planned maintenance. + """Planned maintenance configuration, used to configure when updates can be deployed to a Managed + Cluster. See `planned maintenance `_ + for more information about planned maintenance. Variables are only populated by the server, and will be ignored when sending a request. @@ -2520,14 +3015,15 @@ class MaintenanceConfiguration(SubResource): :ivar type: Resource type. :vartype type: str :ivar system_data: The system metadata relating to this resource. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData - :ivar time_in_week: If two array entries specify the same day of the week, the applied - configuration is the union of times in both entries. - :vartype time_in_week: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeInWeek] + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData + :ivar time_in_week: Time slots during the week when planned maintenance is allowed to proceed. + If two array entries specify the same day of the week, the applied configuration is the union + of times in both entries. + :vartype time_in_week: list[~azure.mgmt.containerservice.models.TimeInWeek] :ivar not_allowed_time: Time slots on which upgrade is not allowed. - :vartype not_allowed_time: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeSpan] + :vartype not_allowed_time: list[~azure.mgmt.containerservice.models.TimeSpan] :ivar maintenance_window: Maintenance window for the maintenance configuration. - :vartype maintenance_window: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceWindow + :vartype maintenance_window: ~azure.mgmt.containerservice.models.MaintenanceWindow """ _validation = { @@ -2550,23 +3046,23 @@ class MaintenanceConfiguration(SubResource): def __init__( self, *, - time_in_week: Optional[List["_models.TimeInWeek"]] = None, - not_allowed_time: Optional[List["_models.TimeSpan"]] = None, + time_in_week: Optional[list["_models.TimeInWeek"]] = None, + not_allowed_time: Optional[list["_models.TimeSpan"]] = None, maintenance_window: Optional["_models.MaintenanceWindow"] = None, **kwargs: Any ) -> None: """ - :keyword time_in_week: If two array entries specify the same day of the week, the applied - configuration is the union of times in both entries. - :paramtype time_in_week: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeInWeek] + :keyword time_in_week: Time slots during the week when planned maintenance is allowed to + proceed. If two array entries specify the same day of the week, the applied configuration is + the union of times in both entries. + :paramtype time_in_week: list[~azure.mgmt.containerservice.models.TimeInWeek] :keyword not_allowed_time: Time slots on which upgrade is not allowed. - :paramtype not_allowed_time: list[~azure.mgmt.containerservice.v2024_07_01.models.TimeSpan] + :paramtype not_allowed_time: list[~azure.mgmt.containerservice.models.TimeSpan] :keyword maintenance_window: Maintenance window for the maintenance configuration. - :paramtype maintenance_window: - ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceWindow + :paramtype maintenance_window: ~azure.mgmt.containerservice.models.MaintenanceWindow """ super().__init__(**kwargs) - self.system_data = None + self.system_data: Optional["_models.SystemData"] = None self.time_in_week = time_in_week self.not_allowed_time = not_allowed_time self.maintenance_window = maintenance_window @@ -2578,7 +3074,7 @@ class MaintenanceConfigurationListResult(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of maintenance configurations. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + :vartype value: list[~azure.mgmt.containerservice.models.MaintenanceConfiguration] :ivar next_link: The URL to get the next set of maintenance configuration results. :vartype next_link: str """ @@ -2592,15 +3088,14 @@ class MaintenanceConfigurationListResult(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.MaintenanceConfiguration"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.MaintenanceConfiguration"]] = None, **kwargs: Any) -> None: """ :keyword value: The list of maintenance configurations. - :paramtype value: - list[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + :paramtype value: list[~azure.mgmt.containerservice.models.MaintenanceConfiguration] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class MaintenanceWindow(_serialization.Model): @@ -2609,7 +3104,7 @@ class MaintenanceWindow(_serialization.Model): All required parameters must be populated in order to send to server. :ivar schedule: Recurrence schedule for the maintenance window. Required. - :vartype schedule: ~azure.mgmt.containerservice.v2024_07_01.models.Schedule + :vartype schedule: ~azure.mgmt.containerservice.models.Schedule :ivar duration_hours: Length of maintenance window range from 4 to 24 hours. :vartype duration_hours: int :ivar utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and '-07:00' @@ -2627,7 +3122,7 @@ class MaintenanceWindow(_serialization.Model): this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC time. - :vartype not_allowed_dates: list[~azure.mgmt.containerservice.v2024_07_01.models.DateSpan] + :vartype not_allowed_dates: list[~azure.mgmt.containerservice.models.DateSpan] """ _validation = { @@ -2654,12 +3149,12 @@ def __init__( start_time: str, utc_offset: Optional[str] = None, start_date: Optional[datetime.date] = None, - not_allowed_dates: Optional[List["_models.DateSpan"]] = None, + not_allowed_dates: Optional[list["_models.DateSpan"]] = None, **kwargs: Any ) -> None: """ :keyword schedule: Recurrence schedule for the maintenance window. Required. - :paramtype schedule: ~azure.mgmt.containerservice.v2024_07_01.models.Schedule + :paramtype schedule: ~azure.mgmt.containerservice.models.Schedule :keyword duration_hours: Length of maintenance window range from 4 to 24 hours. :paramtype duration_hours: int :keyword utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and @@ -2677,7 +3172,7 @@ def __init__( this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC time. - :paramtype not_allowed_dates: list[~azure.mgmt.containerservice.v2024_07_01.models.DateSpan] + :paramtype not_allowed_dates: list[~azure.mgmt.containerservice.models.DateSpan] """ super().__init__(**kwargs) self.schedule = schedule @@ -2694,7 +3189,7 @@ class Resource(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -2703,7 +3198,7 @@ class Resource(_serialization.Model): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData """ _validation = { @@ -2723,10 +3218,10 @@ class Resource(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None - self.system_data = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None + self.system_data: Optional["_models.SystemData"] = None class TrackedResource(Resource): @@ -2738,7 +3233,7 @@ class TrackedResource(Resource): All required parameters must be populated in order to send to server. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -2747,7 +3242,7 @@ class TrackedResource(Resource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar tags: Resource tags. :vartype tags: dict[str, str] :ivar location: The geo-location where the resource lives. Required. @@ -2771,7 +3266,7 @@ class TrackedResource(Resource): "location": {"key": "location", "type": "str"}, } - def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + def __init__(self, *, location: str, tags: Optional[dict[str, str]] = None, **kwargs: Any) -> None: """ :keyword tags: Resource tags. :paramtype tags: dict[str, str] @@ -2783,7 +3278,7 @@ def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kw self.location = location -class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attributes +class ManagedCluster(TrackedResource): """Managed cluster. Variables are only populated by the server, and will be ignored when sending a request. @@ -2791,7 +3286,7 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr All required parameters must be populated in order to send to server. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -2800,147 +3295,160 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar tags: Resource tags. :vartype tags: dict[str, str] :ivar location: The geo-location where the resource lives. Required. :vartype location: str + :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value + will change when the resource is updated. Specify an if-match or if-none-match header with the + eTag value for a subsequent request to enable optimistic concurrency per the normal eTag + convention. + :vartype e_tag: str :ivar sku: The managed cluster SKU. - :vartype sku: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKU + :vartype sku: ~azure.mgmt.containerservice.models.ManagedClusterSKU :ivar extended_location: The extended location of the Virtual Machine. - :vartype extended_location: ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocation + :vartype extended_location: ~azure.mgmt.containerservice.models.ExtendedLocation :ivar identity: The identity of the managed cluster, if configured. - :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIdentity + :vartype identity: ~azure.mgmt.containerservice.models.ManagedClusterIdentity + :ivar kind: This is primarily used to expose different UI experiences in the portal for + different kinds. + :vartype kind: str :ivar provisioning_state: The current provisioning state. :vartype provisioning_state: str :ivar power_state: The Power State of the cluster. - :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :vartype power_state: ~azure.mgmt.containerservice.models.PowerState :ivar max_agent_pools: The max number of agent pools for the managed cluster. :vartype max_agent_pools: int - :ivar kubernetes_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. When you upgrade a supported AKS cluster, Kubernetes minor versions - cannot be skipped. All upgrades must be performed sequentially by major version number. For - example, upgrades between 1.14.x -> 1.15.x or 1.15.x -> 1.16.x are allowed, however 1.14.x -> - 1.16.x is not allowed. See `upgrading an AKS cluster - `_ for more details. + :ivar kubernetes_version: The version of Kubernetes specified by the user. Both patch version + (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. When you upgrade a + supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be + performed sequentially by major version number. For example, upgrades between 1.14.x -> 1.15.x + or 1.15.x -> 1.16.x are allowed, however 1.14.x -> 1.16.x is not allowed. See `upgrading an AKS + cluster `_ for more details. :vartype kubernetes_version: str - :ivar current_kubernetes_version: If kubernetesVersion was a fully specified version - , this field will be exactly equal to it. If kubernetesVersion was - , this field will contain the full version being used. + :ivar current_kubernetes_version: The version of Kubernetes the Managed Cluster is running. If + kubernetesVersion was a fully specified version , this field will be exactly + equal to it. If kubernetesVersion was , this field will contain the full + version being used. :vartype current_kubernetes_version: str - :ivar dns_prefix: This cannot be updated once the Managed Cluster has been created. + :ivar dns_prefix: The DNS prefix of the Managed Cluster. This cannot be updated once the + Managed Cluster has been created. :vartype dns_prefix: str - :ivar fqdn_subdomain: This cannot be updated once the Managed Cluster has been created. + :ivar fqdn_subdomain: The FQDN subdomain of the private cluster with custom private dns zone. + This cannot be updated once the Managed Cluster has been created. :vartype fqdn_subdomain: str :ivar fqdn: The FQDN of the master pool. :vartype fqdn: str :ivar private_fqdn: The FQDN of private cluster. :vartype private_fqdn: str - :ivar azure_portal_fqdn: The Azure Portal requires certain Cross-Origin Resource Sharing (CORS) - headers to be sent in some responses, which Kubernetes APIServer doesn't handle by default. - This special FQDN supports CORS, allowing the Azure Portal to function properly. + :ivar azure_portal_fqdn: The special FQDN used by the Azure Portal to access the Managed + Cluster. This FQDN is for use only by the Azure Portal and should not be used by other clients. + The Azure Portal requires certain Cross-Origin Resource Sharing (CORS) headers to be sent in + some responses, which Kubernetes APIServer doesn't handle by default. This special FQDN + supports CORS, allowing the Azure Portal to function properly. :vartype azure_portal_fqdn: str :ivar agent_pool_profiles: The agent pool properties. :vartype agent_pool_profiles: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAgentPoolProfile] + list[~azure.mgmt.containerservice.models.ManagedClusterAgentPoolProfile] :ivar linux_profile: The profile for Linux VMs in the Managed Cluster. - :vartype linux_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceLinuxProfile + :vartype linux_profile: ~azure.mgmt.containerservice.models.ContainerServiceLinuxProfile :ivar windows_profile: The profile for Windows VMs in the Managed Cluster. - :vartype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWindowsProfile + :vartype windows_profile: ~azure.mgmt.containerservice.models.ManagedClusterWindowsProfile :ivar service_principal_profile: Information about a service principal identity for the cluster to use for manipulating Azure APIs. :vartype service_principal_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile :ivar addon_profiles: The profile of managed cluster add-on. :vartype addon_profiles: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfile] - :ivar pod_identity_profile: See `use AAD pod identity - `_ for more details on AAD pod - identity integration. + ~azure.mgmt.containerservice.models.ManagedClusterAddonProfile] + :ivar pod_identity_profile: The pod identity profile of the Managed Cluster. See `use AAD pod + identity `_ for more details on + AAD pod identity integration. :vartype pod_identity_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProfile + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProfile :ivar oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster. :vartype oidc_issuer_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterOIDCIssuerProfile + ~azure.mgmt.containerservice.models.ManagedClusterOIDCIssuerProfile :ivar node_resource_group: The name of the resource group containing agent pool nodes. :vartype node_resource_group: str + :ivar node_resource_group_profile: Profile of the node resource group configuration. + :vartype node_resource_group_profile: + ~azure.mgmt.containerservice.models.ManagedClusterNodeResourceGroupProfile :ivar enable_rbac: Whether to enable Kubernetes Role-Based Access Control. :vartype enable_rbac: bool :ivar support_plan: The support plan for the Managed Cluster. If unspecified, the default is 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport". - :vartype support_plan: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan - :ivar enable_pod_security_policy: (DEPRECATED) Whether to enable Kubernetes pod security policy - (preview). PodSecurityPolicy was deprecated in Kubernetes v1.21, and removed from Kubernetes in - v1.25. Learn more at https://aka.ms/k8s/psp and https://aka.ms/aks/psp. - :vartype enable_pod_security_policy: bool + :vartype support_plan: str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan :ivar network_profile: The network configuration profile. - :vartype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceNetworkProfile + :vartype network_profile: ~azure.mgmt.containerservice.models.ContainerServiceNetworkProfile :ivar aad_profile: The Azure Active Directory configuration. - :vartype aad_profile: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :vartype aad_profile: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile :ivar auto_upgrade_profile: The auto upgrade configuration. :vartype auto_upgrade_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAutoUpgradeProfile + ~azure.mgmt.containerservice.models.ManagedClusterAutoUpgradeProfile :ivar upgrade_settings: Settings for upgrading a cluster. - :vartype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.ClusterUpgradeSettings + :vartype upgrade_settings: ~azure.mgmt.containerservice.models.ClusterUpgradeSettings :ivar auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled. :vartype auto_scaler_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPropertiesAutoScalerProfile + ~azure.mgmt.containerservice.models.ManagedClusterPropertiesAutoScalerProfile :ivar api_server_access_profile: The access profile for managed cluster API server. :vartype api_server_access_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAPIServerAccessProfile - :ivar disk_encryption_set_id: This is of the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. # pylint: disable=line-too-long + ~azure.mgmt.containerservice.models.ManagedClusterAPIServerAccessProfile + :ivar disk_encryption_set_id: The Resource ID of the disk encryption set to use for enabling + encryption at rest. This is of the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. :vartype disk_encryption_set_id: str - :ivar identity_profile: Identities associated with the cluster. - :vartype identity_profile: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity] + :ivar identity_profile: The user identity associated with the managed cluster. This identity + will be used by the kubelet. Only one user assigned identity is allowed. The only accepted key + is "kubeletidentity", with value of "resourceId": + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}". + :vartype identity_profile: dict[str, ~azure.mgmt.containerservice.models.UserAssignedIdentity] :ivar private_link_resources: Private link resources associated with the cluster. - :vartype private_link_resources: - list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] - :ivar disable_local_accounts: If set to true, getting static credentials will be disabled for - this cluster. This must only be used on Managed Clusters that are AAD enabled. For more details - see `disable local accounts + :vartype private_link_resources: list[~azure.mgmt.containerservice.models.PrivateLinkResource] + :ivar disable_local_accounts: If local accounts should be disabled on the Managed Cluster. If + set to true, getting static credentials will be disabled for this cluster. This must only be + used on Managed Clusters that are AAD enabled. For more details see `disable local accounts `_. :vartype disable_local_accounts: bool :ivar http_proxy_config: Configurations for provisioning the cluster with HTTP proxy servers. - :vartype http_proxy_config: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterHTTPProxyConfig + :vartype http_proxy_config: ~azure.mgmt.containerservice.models.ManagedClusterHTTPProxyConfig :ivar security_profile: Security profile for the managed cluster. - :vartype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfile + :vartype security_profile: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfile :ivar storage_profile: Storage profile for the managed cluster. - :vartype storage_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfile + :vartype storage_profile: ~azure.mgmt.containerservice.models.ManagedClusterStorageProfile :ivar ingress_profile: Ingress profile for the managed cluster. - :vartype ingress_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfile - :ivar public_network_access: Allow or deny public network access for AKS. Known values are: - "Enabled" and "Disabled". - :vartype public_network_access: str or - ~azure.mgmt.containerservice.v2024_07_01.models.PublicNetworkAccess + :vartype ingress_profile: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfile + :ivar public_network_access: PublicNetworkAccess of the managedCluster. Allow or deny public + network access for AKS. Known values are: "Enabled" and "Disabled". + :vartype public_network_access: str or ~azure.mgmt.containerservice.models.PublicNetworkAccess :ivar workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster. :vartype workload_auto_scaler_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfile + ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfile :ivar azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed cluster. :vartype azure_monitor_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfile + ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfile :ivar service_mesh_profile: Service mesh profile for a managed cluster. - :vartype service_mesh_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshProfile + :vartype service_mesh_profile: ~azure.mgmt.containerservice.models.ServiceMeshProfile :ivar resource_uid: The resourceUID uniquely identifies ManagedClusters that reuse ARM ResourceIds (i.e: create, delete, create sequence). :vartype resource_uid: str :ivar metrics_profile: Optional cluster metrics configuration. - :vartype metrics_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterMetricsProfile + :vartype metrics_profile: ~azure.mgmt.containerservice.models.ManagedClusterMetricsProfile + :ivar node_provisioning_profile: Node provisioning settings that apply to the whole cluster. + :vartype node_provisioning_profile: + ~azure.mgmt.containerservice.models.ManagedClusterNodeProvisioningProfile + :ivar bootstrap_profile: Profile of the cluster bootstrap configuration. + :vartype bootstrap_profile: ~azure.mgmt.containerservice.models.ManagedClusterBootstrapProfile + :ivar ai_toolchain_operator_profile: AI toolchain operator settings that apply to the whole + cluster. + :vartype ai_toolchain_operator_profile: + ~azure.mgmt.containerservice.models.ManagedClusterAIToolchainOperatorProfile + :ivar status: Contains read-only information about the Managed Cluster. + :vartype status: ~azure.mgmt.containerservice.models.ManagedClusterStatus """ _validation = { @@ -2949,6 +3457,7 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr "type": {"readonly": True}, "system_data": {"readonly": True}, "location": {"required": True}, + "e_tag": {"readonly": True}, "provisioning_state": {"readonly": True}, "power_state": {"readonly": True}, "max_agent_pools": {"readonly": True}, @@ -2966,9 +3475,11 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr "system_data": {"key": "systemData", "type": "SystemData"}, "tags": {"key": "tags", "type": "{str}"}, "location": {"key": "location", "type": "str"}, + "e_tag": {"key": "eTag", "type": "str"}, "sku": {"key": "sku", "type": "ManagedClusterSKU"}, "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"}, "identity": {"key": "identity", "type": "ManagedClusterIdentity"}, + "kind": {"key": "kind", "type": "str"}, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, "power_state": {"key": "properties.powerState", "type": "PowerState"}, "max_agent_pools": {"key": "properties.maxAgentPools", "type": "int"}, @@ -2990,9 +3501,12 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr "pod_identity_profile": {"key": "properties.podIdentityProfile", "type": "ManagedClusterPodIdentityProfile"}, "oidc_issuer_profile": {"key": "properties.oidcIssuerProfile", "type": "ManagedClusterOIDCIssuerProfile"}, "node_resource_group": {"key": "properties.nodeResourceGroup", "type": "str"}, + "node_resource_group_profile": { + "key": "properties.nodeResourceGroupProfile", + "type": "ManagedClusterNodeResourceGroupProfile", + }, "enable_rbac": {"key": "properties.enableRBAC", "type": "bool"}, "support_plan": {"key": "properties.supportPlan", "type": "str"}, - "enable_pod_security_policy": {"key": "properties.enablePodSecurityPolicy", "type": "bool"}, "network_profile": {"key": "properties.networkProfile", "type": "ContainerServiceNetworkProfile"}, "aad_profile": {"key": "properties.aadProfile", "type": "ManagedClusterAADProfile"}, "auto_upgrade_profile": {"key": "properties.autoUpgradeProfile", "type": "ManagedClusterAutoUpgradeProfile"}, @@ -3022,30 +3536,41 @@ class ManagedCluster(TrackedResource): # pylint: disable=too-many-instance-attr "service_mesh_profile": {"key": "properties.serviceMeshProfile", "type": "ServiceMeshProfile"}, "resource_uid": {"key": "properties.resourceUID", "type": "str"}, "metrics_profile": {"key": "properties.metricsProfile", "type": "ManagedClusterMetricsProfile"}, + "node_provisioning_profile": { + "key": "properties.nodeProvisioningProfile", + "type": "ManagedClusterNodeProvisioningProfile", + }, + "bootstrap_profile": {"key": "properties.bootstrapProfile", "type": "ManagedClusterBootstrapProfile"}, + "ai_toolchain_operator_profile": { + "key": "properties.aiToolchainOperatorProfile", + "type": "ManagedClusterAIToolchainOperatorProfile", + }, + "status": {"key": "properties.status", "type": "ManagedClusterStatus"}, } def __init__( # pylint: disable=too-many-locals self, *, location: str, - tags: Optional[Dict[str, str]] = None, + tags: Optional[dict[str, str]] = None, sku: Optional["_models.ManagedClusterSKU"] = None, extended_location: Optional["_models.ExtendedLocation"] = None, identity: Optional["_models.ManagedClusterIdentity"] = None, + kind: Optional[str] = None, kubernetes_version: Optional[str] = None, dns_prefix: Optional[str] = None, fqdn_subdomain: Optional[str] = None, - agent_pool_profiles: Optional[List["_models.ManagedClusterAgentPoolProfile"]] = None, + agent_pool_profiles: Optional[list["_models.ManagedClusterAgentPoolProfile"]] = None, linux_profile: Optional["_models.ContainerServiceLinuxProfile"] = None, windows_profile: Optional["_models.ManagedClusterWindowsProfile"] = None, service_principal_profile: Optional["_models.ManagedClusterServicePrincipalProfile"] = None, - addon_profiles: Optional[Dict[str, "_models.ManagedClusterAddonProfile"]] = None, + addon_profiles: Optional[dict[str, "_models.ManagedClusterAddonProfile"]] = None, pod_identity_profile: Optional["_models.ManagedClusterPodIdentityProfile"] = None, oidc_issuer_profile: Optional["_models.ManagedClusterOIDCIssuerProfile"] = None, node_resource_group: Optional[str] = None, + node_resource_group_profile: Optional["_models.ManagedClusterNodeResourceGroupProfile"] = None, enable_rbac: Optional[bool] = None, support_plan: Optional[Union[str, "_models.KubernetesSupportPlan"]] = None, - enable_pod_security_policy: Optional[bool] = None, network_profile: Optional["_models.ContainerServiceNetworkProfile"] = None, aad_profile: Optional["_models.ManagedClusterAADProfile"] = None, auto_upgrade_profile: Optional["_models.ManagedClusterAutoUpgradeProfile"] = None, @@ -3053,8 +3578,8 @@ def __init__( # pylint: disable=too-many-locals auto_scaler_profile: Optional["_models.ManagedClusterPropertiesAutoScalerProfile"] = None, api_server_access_profile: Optional["_models.ManagedClusterAPIServerAccessProfile"] = None, disk_encryption_set_id: Optional[str] = None, - identity_profile: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None, - private_link_resources: Optional[List["_models.PrivateLinkResource"]] = None, + identity_profile: Optional[dict[str, "_models.UserAssignedIdentity"]] = None, + private_link_resources: Optional[list["_models.PrivateLinkResource"]] = None, disable_local_accounts: Optional[bool] = None, http_proxy_config: Optional["_models.ManagedClusterHTTPProxyConfig"] = None, security_profile: Optional["_models.ManagedClusterSecurityProfile"] = None, @@ -3065,6 +3590,10 @@ def __init__( # pylint: disable=too-many-locals azure_monitor_profile: Optional["_models.ManagedClusterAzureMonitorProfile"] = None, service_mesh_profile: Optional["_models.ServiceMeshProfile"] = None, metrics_profile: Optional["_models.ManagedClusterMetricsProfile"] = None, + node_provisioning_profile: Optional["_models.ManagedClusterNodeProvisioningProfile"] = None, + bootstrap_profile: Optional["_models.ManagedClusterBootstrapProfile"] = None, + ai_toolchain_operator_profile: Optional["_models.ManagedClusterAIToolchainOperatorProfile"] = None, + status: Optional["_models.ManagedClusterStatus"] = None, **kwargs: Any ) -> None: """ @@ -3073,138 +3602,148 @@ def __init__( # pylint: disable=too-many-locals :keyword location: The geo-location where the resource lives. Required. :paramtype location: str :keyword sku: The managed cluster SKU. - :paramtype sku: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKU + :paramtype sku: ~azure.mgmt.containerservice.models.ManagedClusterSKU :keyword extended_location: The extended location of the Virtual Machine. - :paramtype extended_location: ~azure.mgmt.containerservice.v2024_07_01.models.ExtendedLocation + :paramtype extended_location: ~azure.mgmt.containerservice.models.ExtendedLocation :keyword identity: The identity of the managed cluster, if configured. - :paramtype identity: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIdentity - :keyword kubernetes_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. When you upgrade a supported AKS cluster, Kubernetes minor versions - cannot be skipped. All upgrades must be performed sequentially by major version number. For - example, upgrades between 1.14.x -> 1.15.x or 1.15.x -> 1.16.x are allowed, however 1.14.x -> - 1.16.x is not allowed. See `upgrading an AKS cluster - `_ for more details. + :paramtype identity: ~azure.mgmt.containerservice.models.ManagedClusterIdentity + :keyword kind: This is primarily used to expose different UI experiences in the portal for + different kinds. + :paramtype kind: str + :keyword kubernetes_version: The version of Kubernetes specified by the user. Both patch + version (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. When you upgrade a + supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be + performed sequentially by major version number. For example, upgrades between 1.14.x -> 1.15.x + or 1.15.x -> 1.16.x are allowed, however 1.14.x -> 1.16.x is not allowed. See `upgrading an AKS + cluster `_ for more details. :paramtype kubernetes_version: str - :keyword dns_prefix: This cannot be updated once the Managed Cluster has been created. + :keyword dns_prefix: The DNS prefix of the Managed Cluster. This cannot be updated once the + Managed Cluster has been created. :paramtype dns_prefix: str - :keyword fqdn_subdomain: This cannot be updated once the Managed Cluster has been created. + :keyword fqdn_subdomain: The FQDN subdomain of the private cluster with custom private dns + zone. This cannot be updated once the Managed Cluster has been created. :paramtype fqdn_subdomain: str :keyword agent_pool_profiles: The agent pool properties. :paramtype agent_pool_profiles: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAgentPoolProfile] + list[~azure.mgmt.containerservice.models.ManagedClusterAgentPoolProfile] :keyword linux_profile: The profile for Linux VMs in the Managed Cluster. - :paramtype linux_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceLinuxProfile + :paramtype linux_profile: ~azure.mgmt.containerservice.models.ContainerServiceLinuxProfile :keyword windows_profile: The profile for Windows VMs in the Managed Cluster. - :paramtype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWindowsProfile + :paramtype windows_profile: ~azure.mgmt.containerservice.models.ManagedClusterWindowsProfile :keyword service_principal_profile: Information about a service principal identity for the cluster to use for manipulating Azure APIs. :paramtype service_principal_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile :keyword addon_profiles: The profile of managed cluster add-on. :paramtype addon_profiles: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfile] - :keyword pod_identity_profile: See `use AAD pod identity - `_ for more details on AAD pod - identity integration. + ~azure.mgmt.containerservice.models.ManagedClusterAddonProfile] + :keyword pod_identity_profile: The pod identity profile of the Managed Cluster. See `use AAD + pod identity `_ for more + details on AAD pod identity integration. :paramtype pod_identity_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProfile + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProfile :keyword oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster. :paramtype oidc_issuer_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterOIDCIssuerProfile + ~azure.mgmt.containerservice.models.ManagedClusterOIDCIssuerProfile :keyword node_resource_group: The name of the resource group containing agent pool nodes. :paramtype node_resource_group: str + :keyword node_resource_group_profile: Profile of the node resource group configuration. + :paramtype node_resource_group_profile: + ~azure.mgmt.containerservice.models.ManagedClusterNodeResourceGroupProfile :keyword enable_rbac: Whether to enable Kubernetes Role-Based Access Control. :paramtype enable_rbac: bool :keyword support_plan: The support plan for the Managed Cluster. If unspecified, the default is 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport". - :paramtype support_plan: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesSupportPlan - :keyword enable_pod_security_policy: (DEPRECATED) Whether to enable Kubernetes pod security - policy (preview). PodSecurityPolicy was deprecated in Kubernetes v1.21, and removed from - Kubernetes in v1.25. Learn more at https://aka.ms/k8s/psp and https://aka.ms/aks/psp. - :paramtype enable_pod_security_policy: bool + :paramtype support_plan: str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan :keyword network_profile: The network configuration profile. - :paramtype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ContainerServiceNetworkProfile + :paramtype network_profile: ~azure.mgmt.containerservice.models.ContainerServiceNetworkProfile :keyword aad_profile: The Azure Active Directory configuration. - :paramtype aad_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :paramtype aad_profile: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile :keyword auto_upgrade_profile: The auto upgrade configuration. :paramtype auto_upgrade_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAutoUpgradeProfile + ~azure.mgmt.containerservice.models.ManagedClusterAutoUpgradeProfile :keyword upgrade_settings: Settings for upgrading a cluster. - :paramtype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.ClusterUpgradeSettings + :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.ClusterUpgradeSettings :keyword auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled. :paramtype auto_scaler_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPropertiesAutoScalerProfile + ~azure.mgmt.containerservice.models.ManagedClusterPropertiesAutoScalerProfile :keyword api_server_access_profile: The access profile for managed cluster API server. :paramtype api_server_access_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAPIServerAccessProfile - :keyword disk_encryption_set_id: This is of the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. # pylint: disable=line-too-long + ~azure.mgmt.containerservice.models.ManagedClusterAPIServerAccessProfile + :keyword disk_encryption_set_id: The Resource ID of the disk encryption set to use for enabling + encryption at rest. This is of the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'. :paramtype disk_encryption_set_id: str - :keyword identity_profile: Identities associated with the cluster. + :keyword identity_profile: The user identity associated with the managed cluster. This identity + will be used by the kubelet. Only one user assigned identity is allowed. The only accepted key + is "kubeletidentity", with value of "resourceId": + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}". :paramtype identity_profile: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity] + ~azure.mgmt.containerservice.models.UserAssignedIdentity] :keyword private_link_resources: Private link resources associated with the cluster. :paramtype private_link_resources: - list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] - :keyword disable_local_accounts: If set to true, getting static credentials will be disabled - for this cluster. This must only be used on Managed Clusters that are AAD enabled. For more - details see `disable local accounts + list[~azure.mgmt.containerservice.models.PrivateLinkResource] + :keyword disable_local_accounts: If local accounts should be disabled on the Managed Cluster. + If set to true, getting static credentials will be disabled for this cluster. This must only be + used on Managed Clusters that are AAD enabled. For more details see `disable local accounts `_. :paramtype disable_local_accounts: bool :keyword http_proxy_config: Configurations for provisioning the cluster with HTTP proxy servers. - :paramtype http_proxy_config: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterHTTPProxyConfig + :paramtype http_proxy_config: ~azure.mgmt.containerservice.models.ManagedClusterHTTPProxyConfig :keyword security_profile: Security profile for the managed cluster. - :paramtype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfile + :paramtype security_profile: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfile :keyword storage_profile: Storage profile for the managed cluster. - :paramtype storage_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfile + :paramtype storage_profile: ~azure.mgmt.containerservice.models.ManagedClusterStorageProfile :keyword ingress_profile: Ingress profile for the managed cluster. - :paramtype ingress_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfile - :keyword public_network_access: Allow or deny public network access for AKS. Known values are: - "Enabled" and "Disabled". + :paramtype ingress_profile: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfile + :keyword public_network_access: PublicNetworkAccess of the managedCluster. Allow or deny public + network access for AKS. Known values are: "Enabled" and "Disabled". :paramtype public_network_access: str or - ~azure.mgmt.containerservice.v2024_07_01.models.PublicNetworkAccess + ~azure.mgmt.containerservice.models.PublicNetworkAccess :keyword workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster. :paramtype workload_auto_scaler_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfile + ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfile :keyword azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed cluster. :paramtype azure_monitor_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfile + ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfile :keyword service_mesh_profile: Service mesh profile for a managed cluster. - :paramtype service_mesh_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshProfile + :paramtype service_mesh_profile: ~azure.mgmt.containerservice.models.ServiceMeshProfile :keyword metrics_profile: Optional cluster metrics configuration. - :paramtype metrics_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterMetricsProfile + :paramtype metrics_profile: ~azure.mgmt.containerservice.models.ManagedClusterMetricsProfile + :keyword node_provisioning_profile: Node provisioning settings that apply to the whole cluster. + :paramtype node_provisioning_profile: + ~azure.mgmt.containerservice.models.ManagedClusterNodeProvisioningProfile + :keyword bootstrap_profile: Profile of the cluster bootstrap configuration. + :paramtype bootstrap_profile: + ~azure.mgmt.containerservice.models.ManagedClusterBootstrapProfile + :keyword ai_toolchain_operator_profile: AI toolchain operator settings that apply to the whole + cluster. + :paramtype ai_toolchain_operator_profile: + ~azure.mgmt.containerservice.models.ManagedClusterAIToolchainOperatorProfile + :keyword status: Contains read-only information about the Managed Cluster. + :paramtype status: ~azure.mgmt.containerservice.models.ManagedClusterStatus """ super().__init__(tags=tags, location=location, **kwargs) + self.e_tag: Optional[str] = None self.sku = sku self.extended_location = extended_location self.identity = identity - self.provisioning_state = None - self.power_state = None - self.max_agent_pools = None + self.kind = kind + self.provisioning_state: Optional[str] = None + self.power_state: Optional["_models.PowerState"] = None + self.max_agent_pools: Optional[int] = None self.kubernetes_version = kubernetes_version - self.current_kubernetes_version = None + self.current_kubernetes_version: Optional[str] = None self.dns_prefix = dns_prefix self.fqdn_subdomain = fqdn_subdomain - self.fqdn = None - self.private_fqdn = None - self.azure_portal_fqdn = None + self.fqdn: Optional[str] = None + self.private_fqdn: Optional[str] = None + self.azure_portal_fqdn: Optional[str] = None self.agent_pool_profiles = agent_pool_profiles self.linux_profile = linux_profile self.windows_profile = windows_profile @@ -3213,9 +3752,9 @@ def __init__( # pylint: disable=too-many-locals self.pod_identity_profile = pod_identity_profile self.oidc_issuer_profile = oidc_issuer_profile self.node_resource_group = node_resource_group + self.node_resource_group_profile = node_resource_group_profile self.enable_rbac = enable_rbac self.support_plan = support_plan - self.enable_pod_security_policy = enable_pod_security_policy self.network_profile = network_profile self.aad_profile = aad_profile self.auto_upgrade_profile = auto_upgrade_profile @@ -3234,12 +3773,17 @@ def __init__( # pylint: disable=too-many-locals self.workload_auto_scaler_profile = workload_auto_scaler_profile self.azure_monitor_profile = azure_monitor_profile self.service_mesh_profile = service_mesh_profile - self.resource_uid = None + self.resource_uid: Optional[str] = None self.metrics_profile = metrics_profile + self.node_provisioning_profile = node_provisioning_profile + self.bootstrap_profile = bootstrap_profile + self.ai_toolchain_operator_profile = ai_toolchain_operator_profile + self.status = status class ManagedClusterAADProfile(_serialization.Model): - """For more details see `managed AAD on AKS `_. + """AADProfile specifies attributes for Azure Active Directory integration. For more details see + `managed AAD on AKS `_. :ivar managed: Whether to enable managed AAD. :vartype managed: bool @@ -3277,7 +3821,7 @@ def __init__( *, managed: Optional[bool] = None, enable_azure_rbac: Optional[bool] = None, - admin_group_object_i_ds: Optional[List[str]] = None, + admin_group_object_i_ds: Optional[list[str]] = None, client_app_id: Optional[str] = None, server_app_id: Optional[str] = None, server_app_secret: Optional[str] = None, @@ -3323,7 +3867,7 @@ class ManagedClusterAccessProfile(TrackedResource): All required parameters must be populated in order to send to server. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -3332,7 +3876,7 @@ class ManagedClusterAccessProfile(TrackedResource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar tags: Resource tags. :vartype tags: dict[str, str] :ivar location: The geo-location where the resource lives. Required. @@ -3363,7 +3907,7 @@ def __init__( self, *, location: str, - tags: Optional[Dict[str, str]] = None, + tags: Optional[dict[str, str]] = None, kube_config: Optional[bytes] = None, **kwargs: Any ) -> None: @@ -3391,8 +3935,7 @@ class ManagedClusterAddonProfile(_serialization.Model): :ivar config: Key-value pairs for configuring an add-on. :vartype config: dict[str, str] :ivar identity: Information of user assigned identity used by this add-on. - :vartype identity: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAddonProfileIdentity + :vartype identity: ~azure.mgmt.containerservice.models.ManagedClusterAddonProfileIdentity """ _validation = { @@ -3406,7 +3949,7 @@ class ManagedClusterAddonProfile(_serialization.Model): "identity": {"key": "identity", "type": "ManagedClusterAddonProfileIdentity"}, } - def __init__(self, *, enabled: bool, config: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + def __init__(self, *, enabled: bool, config: Optional[dict[str, str]] = None, **kwargs: Any) -> None: """ :keyword enabled: Whether the add-on is enabled or not. Required. :paramtype enabled: bool @@ -3416,7 +3959,7 @@ def __init__(self, *, enabled: bool, config: Optional[Dict[str, str]] = None, ** super().__init__(**kwargs) self.enabled = enabled self.config = config - self.identity = None + self.identity: Optional["_models.ManagedClusterAddonProfileIdentity"] = None class UserAssignedIdentity(_serialization.Model): @@ -3470,121 +4013,141 @@ class ManagedClusterAddonProfileIdentity(UserAssignedIdentity): """ -class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes +class ManagedClusterAgentPoolProfileProperties(_serialization.Model): """Properties for the container service agent pool profile. Variables are only populated by the server, and will be ignored when sending a request. + :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value + will change when the resource is updated. Specify an if-match or if-none-match header with the + eTag value for a subsequent request to enable optimistic concurrency per the normal eTag + convention. + :vartype e_tag: str :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :vartype count: int - :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute - resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted - VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node + contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. + For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :vartype vm_size: str :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :vartype os_disk_size_gb: int - :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS - `_. Known values are: - "Managed" and "Ephemeral". - :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is + 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. + Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see + `Ephemeral OS `_. + Known values are: "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :vartype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :ivar workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :vartype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. - If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just - nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded + string which will be written to /etc/motd after decoding. This allows customization of the + message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a + static string (i.e., will be printed raw and not be executed as a script). + :vartype message_of_the_day: str + :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join + on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype vnet_subnet_id: str - :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod + IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of + the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype pod_subnet_id: str + :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode :ivar max_pods: The maximum number of pods that can run on a node. :vartype max_pods: int :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :ivar max_count: The maximum number of nodes for auto-scaling. :vartype max_count: int :ivar min_count: The minimum number of nodes for auto-scaling. :vartype min_count: int :ivar enable_auto_scaling: Whether to enable auto-scaler. :vartype enable_auto_scaling: bool - :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it - defaults to Delete. Known values are: "Delete" and "Deallocate". - :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode - :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and - "AvailabilitySet". - :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional - information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode + :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets", + "AvailabilitySet", and "VirtualMachines". + :vartype type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at + all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version + (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :vartype orchestrator_version: str - :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version - , this field will be exactly equal to it. If orchestratorVersion is - , this field will contain the full version being used. + :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If + orchestratorVersion is a fully specified version , this field will be + exactly equal to it. If orchestratorVersion is , this field will contain the full + version being used. :vartype current_orchestrator_version: str :ivar node_image_version: The version of node image. :vartype node_image_version: str :ivar upgrade_settings: Settings for upgrading the agentpool. - :vartype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings :ivar provisioning_state: The current deployment or provisioning state. :vartype provisioning_state: str - :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool - can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and - does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.models.PowerState :ivar availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :vartype availability_zones: list[str] - :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their - own dedicated public IP addresses. A common scenario is for gaming workloads, where a console - needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node + :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios + may require nodes in a node pool to receive their own dedicated public IP addresses. A common + scenario is for gaming workloads, where a console needs to make a direct connection to a cloud + virtual machine to minimize hops. For more information see `assigning a public IP per node `_. The default is false. :vartype enable_node_public_ip: bool - :ivar node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :vartype node_public_ip_prefix_id: str :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :vartype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :vartype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances. + Possible values are any decimal value greater than zero or -1 which indicates default price to + be up-to on-demand. Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see `spot VMs pricing `_. :vartype spot_max_price: float @@ -3598,46 +4161,61 @@ class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: :ivar proximity_placement_group_id: The ID for Proximity Placement Group. :vartype proximity_placement_group_id: str :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. - :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :ivar linux_os_config: The OS configuration of Linux agent nodes. - :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :vartype enable_encryption_at_host: bool :ivar enable_ultra_ssd: Whether to enable UltraSSD. :vartype enable_ultra_ssd: bool - :ivar enable_fips: See `Add a FIPS-enabled node pool + :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :vartype enable_fips: bool :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :vartype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :vartype capacity_reservation_group_id: str - :ivar host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision + virtual machines from, used only in creation scenario and not allowed to changed once set. This + is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :vartype host_group_id: str :ivar network_profile: Network-related settings of an agent pool. - :vartype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :ivar windows_profile: The Windows agent pool's specific profile. - :vartype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :ivar security_profile: The security settings of an agent pool. - :vartype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile + :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :ivar gpu_profile: GPU settings for the Agent Pool. + :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool. + :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :vartype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :ivar status: Contains read-only information about the Agent Pool. + :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides. + LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For + more details see aka.ms/aks/localdns. + :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile """ _validation = { + "e_tag": {"readonly": True}, "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, "current_orchestrator_version": {"readonly": True}, "node_image_version": {"readonly": True}, @@ -3645,14 +4223,17 @@ class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: } _attribute_map = { + "e_tag": {"key": "eTag", "type": "str"}, "count": {"key": "count", "type": "int"}, "vm_size": {"key": "vmSize", "type": "str"}, "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"}, "os_disk_type": {"key": "osDiskType", "type": "str"}, "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"}, "workload_runtime": {"key": "workloadRuntime", "type": "str"}, + "message_of_the_day": {"key": "messageOfTheDay", "type": "str"}, "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"}, "pod_subnet_id": {"key": "podSubnetID", "type": "str"}, + "pod_ip_allocation_mode": {"key": "podIPAllocationMode", "type": "str"}, "max_pods": {"key": "maxPods", "type": "int"}, "os_type": {"key": "osType", "type": "str"}, "os_sku": {"key": "osSKU", "type": "str"}, @@ -3690,6 +4271,12 @@ class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"}, "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"}, "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"}, + "gpu_profile": {"key": "gpuProfile", "type": "GPUProfile"}, + "gateway_profile": {"key": "gatewayProfile", "type": "AgentPoolGatewayProfile"}, + "virtual_machines_profile": {"key": "virtualMachinesProfile", "type": "VirtualMachinesProfile"}, + "virtual_machine_nodes_status": {"key": "virtualMachineNodesStatus", "type": "[VirtualMachineNodes]"}, + "status": {"key": "status", "type": "AgentPoolStatus"}, + "local_dns_profile": {"key": "localDNSProfile", "type": "LocalDNSProfile"}, } def __init__( # pylint: disable=too-many-locals @@ -3701,8 +4288,10 @@ def __init__( # pylint: disable=too-many-locals os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + message_of_the_day: Optional[str] = None, vnet_subnet_id: Optional[str] = None, pod_subnet_id: Optional[str] = None, + pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None, max_pods: Optional[int] = None, os_type: Union[str, "_models.OSType"] = "Linux", os_sku: Optional[Union[str, "_models.OSSKU"]] = None, @@ -3715,15 +4304,15 @@ def __init__( # pylint: disable=too-many-locals orchestrator_version: Optional[str] = None, upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, power_state: Optional["_models.PowerState"] = None, - availability_zones: Optional[List[str]] = None, + availability_zones: Optional[list[str]] = None, enable_node_public_ip: Optional[bool] = None, node_public_ip_prefix_id: Optional[str] = None, scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", spot_max_price: float = -1, - tags: Optional[Dict[str, str]] = None, - node_labels: Optional[Dict[str, str]] = None, - node_taints: Optional[List[str]] = None, + tags: Optional[dict[str, str]] = None, + node_labels: Optional[dict[str, str]] = None, + node_taints: Optional[list[str]] = None, proximity_placement_group_id: Optional[str] = None, kubelet_config: Optional["_models.KubeletConfig"] = None, linux_os_config: Optional["_models.LinuxOSConfig"] = None, @@ -3737,6 +4326,12 @@ def __init__( # pylint: disable=too-many-locals network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + gpu_profile: Optional["_models.GPUProfile"] = None, + gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None, + virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None, + virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None, + status: Optional["_models.AgentPoolStatus"] = None, + local_dns_profile: Optional["_models.LocalDNSProfile"] = None, **kwargs: Any ) -> None: """ @@ -3744,107 +4339,124 @@ def __init__( # pylint: disable=too-many-locals range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :paramtype count: int - :keyword vm_size: VM size availability varies by region. If a node contains insufficient - compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on - restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a + node contains insufficient compute resources (memory, cpu, etc) pods might fail to run + correctly. For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :paramtype vm_size: str :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :paramtype os_disk_size_gb: int - :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS + :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default + is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested + OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more + information see `Ephemeral OS `_. Known values are: "Managed" and "Ephemeral". - :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :paramtype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :keyword workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :paramtype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and - used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to - just nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A + base64-encoded string which will be written to /etc/motd after decoding. This allows + customization of the message of the day for Linux nodes. It must not be specified for Windows + nodes. It must be a static string (i.e., will be printed raw and not be executed as a script). + :paramtype message_of_the_day: str + :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will + join on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype vnet_subnet_id: str - :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, + pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is + of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype pod_subnet_id: str + :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :paramtype pod_ip_allocation_mode: str or + ~azure.mgmt.containerservice.models.PodIPAllocationMode :keyword max_pods: The maximum number of pods that can run on a node. :paramtype max_pods: int :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :keyword max_count: The maximum number of nodes for auto-scaling. :paramtype max_count: int :keyword min_count: The minimum number of nodes for auto-scaling. :paramtype min_count: int :keyword enable_auto_scaling: Whether to enable auto-scaler. :paramtype enable_auto_scaling: bool - :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, - it defaults to Delete. Known values are: "Delete" and "Deallocate". - :paramtype scale_down_mode: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode - :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and - "AvailabilitySet". - :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For - additional information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode + :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets", + "AvailabilitySet", and "VirtualMachines". + :paramtype type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool + at all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch + version (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :paramtype orchestrator_version: str :keyword upgrade_settings: Settings for upgrading the agentpool. - :paramtype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings - :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent - Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs - and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings + :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState :keyword availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :paramtype availability_zones: list[str] - :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive - their own dedicated public IP addresses. A common scenario is for gaming workloads, where a - console needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node - `_. # pylint: disable=line-too-long + :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some + scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. + A common scenario is for gaming workloads, where a console needs to make a direct connection to + a cloud virtual machine to minimize hops. For more information see `assigning a public IP per + node + `_. The default is false. :paramtype enable_node_public_ip: bool - :keyword node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :paramtype node_public_ip_prefix_id: str :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :paramtype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :paramtype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which - indicates the willingness to pay any on-demand price. For more details on spot pricing, see - `spot VMs pricing `_. + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot + instances. Possible values are any decimal value greater than zero or -1 which indicates + default price to be up-to on-demand. Possible values are any decimal value greater than zero or + -1 which indicates the willingness to pay any on-demand price. For more details on spot + pricing, see `spot VMs pricing + `_. :paramtype spot_max_price: float :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. :paramtype tags: dict[str, str] @@ -3856,54 +4468,71 @@ def __init__( # pylint: disable=too-many-locals :keyword proximity_placement_group_id: The ID for Proximity Placement Group. :paramtype proximity_placement_group_id: str :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. - :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :keyword linux_os_config: The OS configuration of Linux agent nodes. - :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :paramtype enable_encryption_at_host: bool :keyword enable_ultra_ssd: Whether to enable UltraSSD. :paramtype enable_ultra_ssd: bool - :keyword enable_fips: See `Add a FIPS-enabled node pool + :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :paramtype enable_fips: bool :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :paramtype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :paramtype capacity_reservation_group_id: str - :keyword host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to + provision virtual machines from, used only in creation scenario and not allowed to changed once + set. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :paramtype host_group_id: str :keyword network_profile: Network-related settings of an agent pool. - :paramtype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :keyword windows_profile: The Windows agent pool's specific profile. - :paramtype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :keyword security_profile: The security settings of an agent pool. - :paramtype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile - """ - super().__init__(**kwargs) + :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :keyword gpu_profile: GPU settings for the Agent Pool. + :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool. + :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :paramtype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :keyword status: Contains read-only information about the Agent Pool. + :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS + overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS + cluster. For more details see aka.ms/aks/localdns. + :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile + """ + super().__init__(**kwargs) + self.e_tag: Optional[str] = None self.count = count self.vm_size = vm_size self.os_disk_size_gb = os_disk_size_gb self.os_disk_type = os_disk_type self.kubelet_disk_type = kubelet_disk_type self.workload_runtime = workload_runtime + self.message_of_the_day = message_of_the_day self.vnet_subnet_id = vnet_subnet_id self.pod_subnet_id = pod_subnet_id + self.pod_ip_allocation_mode = pod_ip_allocation_mode self.max_pods = max_pods self.os_type = os_type self.os_sku = os_sku @@ -3914,10 +4543,10 @@ def __init__( # pylint: disable=too-many-locals self.type = type self.mode = mode self.orchestrator_version = orchestrator_version - self.current_orchestrator_version = None - self.node_image_version = None + self.current_orchestrator_version: Optional[str] = None + self.node_image_version: Optional[str] = None self.upgrade_settings = upgrade_settings - self.provisioning_state = None + self.provisioning_state: Optional[str] = None self.power_state = power_state self.availability_zones = availability_zones self.enable_node_public_ip = enable_node_public_ip @@ -3941,127 +4570,151 @@ def __init__( # pylint: disable=too-many-locals self.network_profile = network_profile self.windows_profile = windows_profile self.security_profile = security_profile + self.gpu_profile = gpu_profile + self.gateway_profile = gateway_profile + self.virtual_machines_profile = virtual_machines_profile + self.virtual_machine_nodes_status = virtual_machine_nodes_status + self.status = status + self.local_dns_profile = local_dns_profile -class ManagedClusterAgentPoolProfile( - ManagedClusterAgentPoolProfileProperties -): # pylint: disable=too-many-instance-attributes +class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties): """Profile for the container service agent pool. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to server. + :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value + will change when the resource is updated. Specify an if-match or if-none-match header with the + eTag value for a subsequent request to enable optimistic concurrency per the normal eTag + convention. + :vartype e_tag: str :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :vartype count: int - :ivar vm_size: VM size availability varies by region. If a node contains insufficient compute - resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted - VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node + contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. + For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :vartype vm_size: str :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :vartype os_disk_size_gb: int - :ivar os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS - `_. Known values are: - "Managed" and "Ephemeral". - :vartype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is + 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. + Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see + `Ephemeral OS `_. + Known values are: "Managed" and "Ephemeral". + :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :vartype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :ivar workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :vartype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :ivar vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and used. - If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just - nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded + string which will be written to /etc/motd after decoding. This allows customization of the + message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a + static string (i.e., will be printed raw and not be executed as a script). + :vartype message_of_the_day: str + :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join + on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype vnet_subnet_id: str - :ivar pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod + IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of + the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :vartype pod_subnet_id: str + :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode :ivar max_pods: The maximum number of pods that can run on a node. :vartype max_pods: int :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :ivar max_count: The maximum number of nodes for auto-scaling. :vartype max_count: int :ivar min_count: The minimum number of nodes for auto-scaling. :vartype min_count: int :ivar enable_auto_scaling: Whether to enable auto-scaler. :vartype enable_auto_scaling: bool - :ivar scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, it - defaults to Delete. Known values are: "Delete" and "Deallocate". - :vartype scale_down_mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode - :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and - "AvailabilitySet". - :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :ivar mode: A cluster must have at least one 'System' Agent Pool at all times. For additional - information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :ivar orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode + :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets", + "AvailabilitySet", and "VirtualMachines". + :vartype type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at + all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version + (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :vartype orchestrator_version: str - :ivar current_orchestrator_version: If orchestratorVersion is a fully specified version - , this field will be exactly equal to it. If orchestratorVersion is - , this field will contain the full version being used. + :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If + orchestratorVersion is a fully specified version , this field will be + exactly equal to it. If orchestratorVersion is , this field will contain the full + version being used. :vartype current_orchestrator_version: str :ivar node_image_version: The version of node image. :vartype node_image_version: str :ivar upgrade_settings: Settings for upgrading the agentpool. - :vartype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings + :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings :ivar provisioning_state: The current deployment or provisioning state. :vartype provisioning_state: str - :ivar power_state: When an Agent Pool is first created it is initially Running. The Agent Pool - can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and - does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :vartype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :vartype power_state: ~azure.mgmt.containerservice.models.PowerState :ivar availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :vartype availability_zones: list[str] - :ivar enable_node_public_ip: Some scenarios may require nodes in a node pool to receive their - own dedicated public IP addresses. A common scenario is for gaming workloads, where a console - needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node + :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios + may require nodes in a node pool to receive their own dedicated public IP addresses. A common + scenario is for gaming workloads, where a console needs to make a direct connection to a cloud + virtual machine to minimize hops. For more information see `assigning a public IP per node `_. The default is false. :vartype enable_node_public_ip: bool - :ivar node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :vartype node_public_ip_prefix_id: str :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :vartype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :ivar scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :vartype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :ivar spot_max_price: Possible values are any decimal value greater than zero or -1 which + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances. + Possible values are any decimal value greater than zero or -1 which indicates default price to + be up-to on-demand. Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see `spot VMs pricing `_. :vartype spot_max_price: float @@ -4075,48 +4728,64 @@ class ManagedClusterAgentPoolProfile( :ivar proximity_placement_group_id: The ID for Proximity Placement Group. :vartype proximity_placement_group_id: str :ivar kubelet_config: The Kubelet configuration on the agent pool nodes. - :vartype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :ivar linux_os_config: The OS configuration of Linux agent nodes. - :vartype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :ivar enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :vartype enable_encryption_at_host: bool :ivar enable_ultra_ssd: Whether to enable UltraSSD. :vartype enable_ultra_ssd: bool - :ivar enable_fips: See `Add a FIPS-enabled node pool + :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :vartype enable_fips: bool :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :vartype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :vartype capacity_reservation_group_id: str - :ivar host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision + virtual machines from, used only in creation scenario and not allowed to changed once set. This + is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :vartype host_group_id: str :ivar network_profile: Network-related settings of an agent pool. - :vartype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :ivar windows_profile: The Windows agent pool's specific profile. - :vartype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :ivar security_profile: The security settings of an agent pool. - :vartype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile - :ivar name: Windows agent pool names must be 6 characters or less. Required. + :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :ivar gpu_profile: GPU settings for the Agent Pool. + :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool. + :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :vartype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :ivar status: Contains read-only information about the Agent Pool. + :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides. + LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For + more details see aka.ms/aks/localdns. + :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile + :ivar name: Unique name of the agent pool profile in the context of the subscription and + resource group. Windows agent pool names must be 6 characters or less. Required. :vartype name: str """ _validation = { + "e_tag": {"readonly": True}, "os_disk_size_gb": {"maximum": 2048, "minimum": 0}, "current_orchestrator_version": {"readonly": True}, "node_image_version": {"readonly": True}, @@ -4125,14 +4794,17 @@ class ManagedClusterAgentPoolProfile( } _attribute_map = { + "e_tag": {"key": "eTag", "type": "str"}, "count": {"key": "count", "type": "int"}, "vm_size": {"key": "vmSize", "type": "str"}, "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"}, "os_disk_type": {"key": "osDiskType", "type": "str"}, "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"}, "workload_runtime": {"key": "workloadRuntime", "type": "str"}, + "message_of_the_day": {"key": "messageOfTheDay", "type": "str"}, "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"}, "pod_subnet_id": {"key": "podSubnetID", "type": "str"}, + "pod_ip_allocation_mode": {"key": "podIPAllocationMode", "type": "str"}, "max_pods": {"key": "maxPods", "type": "int"}, "os_type": {"key": "osType", "type": "str"}, "os_sku": {"key": "osSKU", "type": "str"}, @@ -4170,6 +4842,12 @@ class ManagedClusterAgentPoolProfile( "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"}, "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"}, "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"}, + "gpu_profile": {"key": "gpuProfile", "type": "GPUProfile"}, + "gateway_profile": {"key": "gatewayProfile", "type": "AgentPoolGatewayProfile"}, + "virtual_machines_profile": {"key": "virtualMachinesProfile", "type": "VirtualMachinesProfile"}, + "virtual_machine_nodes_status": {"key": "virtualMachineNodesStatus", "type": "[VirtualMachineNodes]"}, + "status": {"key": "status", "type": "AgentPoolStatus"}, + "local_dns_profile": {"key": "localDNSProfile", "type": "LocalDNSProfile"}, "name": {"key": "name", "type": "str"}, } @@ -4183,8 +4861,10 @@ def __init__( # pylint: disable=too-many-locals os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None, kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None, workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None, + message_of_the_day: Optional[str] = None, vnet_subnet_id: Optional[str] = None, pod_subnet_id: Optional[str] = None, + pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None, max_pods: Optional[int] = None, os_type: Union[str, "_models.OSType"] = "Linux", os_sku: Optional[Union[str, "_models.OSSKU"]] = None, @@ -4197,15 +4877,15 @@ def __init__( # pylint: disable=too-many-locals orchestrator_version: Optional[str] = None, upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None, power_state: Optional["_models.PowerState"] = None, - availability_zones: Optional[List[str]] = None, + availability_zones: Optional[list[str]] = None, enable_node_public_ip: Optional[bool] = None, node_public_ip_prefix_id: Optional[str] = None, scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular", scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete", spot_max_price: float = -1, - tags: Optional[Dict[str, str]] = None, - node_labels: Optional[Dict[str, str]] = None, - node_taints: Optional[List[str]] = None, + tags: Optional[dict[str, str]] = None, + node_labels: Optional[dict[str, str]] = None, + node_taints: Optional[list[str]] = None, proximity_placement_group_id: Optional[str] = None, kubelet_config: Optional["_models.KubeletConfig"] = None, linux_os_config: Optional["_models.LinuxOSConfig"] = None, @@ -4219,6 +4899,12 @@ def __init__( # pylint: disable=too-many-locals network_profile: Optional["_models.AgentPoolNetworkProfile"] = None, windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None, security_profile: Optional["_models.AgentPoolSecurityProfile"] = None, + gpu_profile: Optional["_models.GPUProfile"] = None, + gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None, + virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None, + virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None, + status: Optional["_models.AgentPoolStatus"] = None, + local_dns_profile: Optional["_models.LocalDNSProfile"] = None, **kwargs: Any ) -> None: """ @@ -4226,107 +4912,124 @@ def __init__( # pylint: disable=too-many-locals range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. :paramtype count: int - :keyword vm_size: VM size availability varies by region. If a node contains insufficient - compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on - restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions. + :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a + node contains insufficient compute resources (memory, cpu, etc) pods might fail to run + correctly. For more details on restricted VM sizes, see: + https://docs.microsoft.com/azure/aks/quotas-skus-regions. :paramtype vm_size: str :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. :paramtype os_disk_size_gb: int - :keyword os_disk_type: The default is 'Ephemeral' if the VM supports it and has a cache disk - larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed - after creation. For more information see `Ephemeral OS + :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default + is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested + OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more + information see `Ephemeral OS `_. Known values are: "Managed" and "Ephemeral". - :paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSDiskType + :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary". - :paramtype kubelet_disk_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.KubeletDiskType + :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType :keyword workload_runtime: Determines the type of workload a node can run. Known values are: - "OCIContainer" and "WasmWasi". - :paramtype workload_runtime: str or - ~azure.mgmt.containerservice.v2024_07_01.models.WorkloadRuntime - :keyword vnet_subnet_id: If this is not specified, a VNET and subnet will be generated and - used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to - just nodes. This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + "OCIContainer", "WasmWasi", and "KataVmIsolation". + :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime + :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A + base64-encoded string which will be written to /etc/motd after decoding. This allows + customization of the message of the day for Linux nodes. It must not be specified for Windows + nodes. It must be a static string (i.e., will be printed raw and not be executed as a script). + :paramtype message_of_the_day: str + :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will + join on startup. If this is not specified, a VNET and subnet will be generated and used. If no + podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype vnet_subnet_id: str - :keyword pod_subnet_id: If omitted, pod IPs are statically assigned on the node subnet (see - vnetSubnetID for more details). This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. # pylint: disable=line-too-long + :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, + pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is + of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}. :paramtype pod_subnet_id: str + :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the + agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values + are: "DynamicIndividual" and "StaticBlock". + :paramtype pod_ip_allocation_mode: str or + ~azure.mgmt.containerservice.models.PodIPAllocationMode :keyword max_pods: The maximum number of pods that can run on a node. :paramtype max_pods: int :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :paramtype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :keyword max_count: The maximum number of nodes for auto-scaling. :paramtype max_count: int :keyword min_count: The minimum number of nodes for auto-scaling. :paramtype min_count: int :keyword enable_auto_scaling: Whether to enable auto-scaler. :paramtype enable_auto_scaling: bool - :keyword scale_down_mode: This also effects the cluster autoscaler behavior. If not specified, - it defaults to Delete. Known values are: "Delete" and "Deallocate". - :paramtype scale_down_mode: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleDownMode - :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets" and - "AvailabilitySet". - :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolType - :keyword mode: A cluster must have at least one 'System' Agent Pool at all times. For - additional information on agent pool restrictions and best practices, see: - https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System" and "User". - :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolMode - :keyword orchestrator_version: Both patch version (e.g. 1.20.13) and - (e.g. 1.20) are supported. When is specified, the latest supported - GA patch version is chosen automatically. Updating the cluster with the same once - it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch - version is available. As a best practice, you should upgrade all node pools in an AKS cluster - to the same Kubernetes version. The node pool version must have the same major version as the - control plane. The node pool minor version must be within two minor versions of the control - plane version. The node pool version cannot be greater than the control plane version. For more - information see `upgrading a node pool + :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also + effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values + are: "Delete" and "Deallocate". + :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode + :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets", + "AvailabilitySet", and "VirtualMachines". + :paramtype type: str or ~azure.mgmt.containerservice.models.AgentPoolType + :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool + at all times. For additional information on agent pool restrictions and best practices, see: + https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and + "Gateway". + :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode + :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch + version (e.g. 1.20.13) and (e.g. 1.20) are supported. When + is specified, the latest supported GA patch version is chosen automatically. + Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14) + will not trigger an upgrade, even if a newer patch version is available. As a best practice, + you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node + pool version must have the same major version as the control plane. The node pool minor version + must be within two minor versions of the control plane version. The node pool version cannot be + greater than the control plane version. For more information see `upgrading a node pool `_. :paramtype orchestrator_version: str :keyword upgrade_settings: Settings for upgrading the agentpool. - :paramtype upgrade_settings: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeSettings - :keyword power_state: When an Agent Pool is first created it is initially Running. The Agent - Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs - and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and - provisioning state is Succeeded. - :paramtype power_state: ~azure.mgmt.containerservice.v2024_07_01.models.PowerState + :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings + :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first + created it is initially Running. The Agent Pool can be stopped by setting this field to + Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An + Agent Pool can only be stopped if it is Running and provisioning state is Succeeded. + :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState :keyword availability_zones: The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. :paramtype availability_zones: list[str] - :keyword enable_node_public_ip: Some scenarios may require nodes in a node pool to receive - their own dedicated public IP addresses. A common scenario is for gaming workloads, where a - console needs to make a direct connection to a cloud virtual machine to minimize hops. For more - information see `assigning a public IP per node - `_. # pylint: disable=line-too-long + :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some + scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. + A common scenario is for gaming workloads, where a console needs to make a direct connection to + a cloud virtual machine to minimize hops. For more information see `assigning a public IP per + node + `_. The default is false. :paramtype enable_node_public_ip: bool - :keyword node_public_ip_prefix_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. # pylint: disable=line-too-long + :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from. + This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}. :paramtype node_public_ip_prefix_id: str :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Known values are: "Spot" and "Regular". - :paramtype scale_set_priority: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetPriority - :keyword scale_set_eviction_policy: This cannot be specified unless the scaleSetPriority is - 'Spot'. If not specified, the default is 'Delete'. Known values are: "Delete" and "Deallocate". + :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority + :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This + cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is + 'Delete'. Known values are: "Delete" and "Deallocate". :paramtype scale_set_eviction_policy: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ScaleSetEvictionPolicy - :keyword spot_max_price: Possible values are any decimal value greater than zero or -1 which - indicates the willingness to pay any on-demand price. For more details on spot pricing, see - `spot VMs pricing `_. + ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy + :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot + instances. Possible values are any decimal value greater than zero or -1 which indicates + default price to be up-to on-demand. Possible values are any decimal value greater than zero or + -1 which indicates the willingness to pay any on-demand price. For more details on spot + pricing, see `spot VMs pricing + `_. :paramtype spot_max_price: float :keyword tags: The tags to be persisted on the agent pool virtual machine scale set. :paramtype tags: dict[str, str] @@ -4338,45 +5041,60 @@ def __init__( # pylint: disable=too-many-locals :keyword proximity_placement_group_id: The ID for Proximity Placement Group. :paramtype proximity_placement_group_id: str :keyword kubelet_config: The Kubelet configuration on the agent pool nodes. - :paramtype kubelet_config: ~azure.mgmt.containerservice.v2024_07_01.models.KubeletConfig + :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig :keyword linux_os_config: The OS configuration of Linux agent nodes. - :paramtype linux_os_config: ~azure.mgmt.containerservice.v2024_07_01.models.LinuxOSConfig - :keyword enable_encryption_at_host: This is only supported on certain VM sizes and in certain - Azure regions. For more information, see: - https://docs.microsoft.com/azure/aks/enable-host-encryption. + :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig + :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption. + This is only supported on certain VM sizes and in certain Azure regions. For more information, + see: https://docs.microsoft.com/azure/aks/enable-host-encryption. :paramtype enable_encryption_at_host: bool :keyword enable_ultra_ssd: Whether to enable UltraSSD. :paramtype enable_ultra_ssd: bool - :keyword enable_fips: See `Add a FIPS-enabled node pool + :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool `_ for more details. :paramtype enable_fips: bool :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g". - :paramtype gpu_instance_profile: str or - ~azure.mgmt.containerservice.v2024_07_01.models.GPUInstanceProfile + :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the Capacity Reservation Group. :paramtype capacity_reservation_group_id: str - :keyword host_group_id: This is of the form: - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. # pylint: disable=line-too-long + :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to + provision virtual machines from, used only in creation scenario and not allowed to changed once + set. This is of the form: + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see `Azure dedicated hosts `_. :paramtype host_group_id: str :keyword network_profile: Network-related settings of an agent pool. - :paramtype network_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolNetworkProfile + :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile :keyword windows_profile: The Windows agent pool's specific profile. - :paramtype windows_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolWindowsProfile + :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile :keyword security_profile: The security settings of an agent pool. - :paramtype security_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolSecurityProfile - :keyword name: Windows agent pool names must be 6 characters or less. Required. + :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile + :keyword gpu_profile: GPU settings for the Agent Pool. + :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile + :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field + cannot be set if agent pool mode is not Gateway. + :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile + :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool. + :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile + :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool. + :paramtype virtual_machine_nodes_status: + list[~azure.mgmt.containerservice.models.VirtualMachineNodes] + :keyword status: Contains read-only information about the Agent Pool. + :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus + :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS + overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS + cluster. For more details see aka.ms/aks/localdns. + :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile + :keyword name: Unique name of the agent pool profile in the context of the subscription and + resource group. Windows agent pool names must be 6 characters or less. Required. :paramtype name: str """ super().__init__( @@ -4386,8 +5104,10 @@ def __init__( # pylint: disable=too-many-locals os_disk_type=os_disk_type, kubelet_disk_type=kubelet_disk_type, workload_runtime=workload_runtime, + message_of_the_day=message_of_the_day, vnet_subnet_id=vnet_subnet_id, pod_subnet_id=pod_subnet_id, + pod_ip_allocation_mode=pod_ip_allocation_mode, max_pods=max_pods, os_type=os_type, os_sku=os_sku, @@ -4422,23 +5142,56 @@ def __init__( # pylint: disable=too-many-locals network_profile=network_profile, windows_profile=windows_profile, security_profile=security_profile, + gpu_profile=gpu_profile, + gateway_profile=gateway_profile, + virtual_machines_profile=virtual_machines_profile, + virtual_machine_nodes_status=virtual_machine_nodes_status, + status=status, + local_dns_profile=local_dns_profile, **kwargs ) self.name = name +class ManagedClusterAIToolchainOperatorProfile(_serialization.Model): + """When enabling the operator, a set of AKS managed CRDs and controllers will be installed in the + cluster. The operator automates the deployment of OSS models for inference and/or training + purposes. It provides a set of preset models and enables distributed inference against them. + + :ivar enabled: Whether to enable AI toolchain operator to the cluster. Indicates if AI + toolchain operator enabled or not. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Whether to enable AI toolchain operator to the cluster. Indicates if AI + toolchain operator enabled or not. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + class ManagedClusterAPIServerAccessProfile(_serialization.Model): """Access profile for managed cluster API server. - :ivar authorized_ip_ranges: IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. - This feature is not compatible with clusters that use Public IP Per Node, or clusters that are - using a Basic Load Balancer. For more information see `API server authorized IP ranges + :ivar authorized_ip_ranges: The IP ranges authorized to access the Kubernetes API server. IP + ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible + with clusters that use Public IP Per Node, or clusters that are using a Basic Load Balancer. + For more information see `API server authorized IP ranges `_. :vartype authorized_ip_ranges: list[str] - :ivar enable_private_cluster: For more details, see `Creating a private AKS cluster + :ivar enable_private_cluster: Whether to create the cluster as a private cluster or not. For + more details, see `Creating a private AKS cluster `_. :vartype enable_private_cluster: bool - :ivar private_dns_zone: The default is System. For more details see `configure private DNS zone + :ivar private_dns_zone: The private DNS zone mode for the cluster. The default is System. For + more details see `configure private DNS zone `_. Allowed values are 'system' and 'none'. :vartype private_dns_zone: str @@ -4447,6 +5200,13 @@ class ManagedClusterAPIServerAccessProfile(_serialization.Model): :vartype enable_private_cluster_public_fqdn: bool :ivar disable_run_command: Whether to disable run command for the cluster or not. :vartype disable_run_command: bool + :ivar enable_vnet_integration: Whether to enable apiserver vnet integration for the cluster or + not. See aka.ms/AksVnetIntegration for more details. + :vartype enable_vnet_integration: bool + :ivar subnet_id: The subnet to be used when apiserver vnet integration is enabled. It is + required when creating a new cluster with BYO Vnet, or when updating an existing cluster to + enable apiserver vnet integration. + :vartype subnet_id: str """ _attribute_map = { @@ -4455,36 +5215,50 @@ class ManagedClusterAPIServerAccessProfile(_serialization.Model): "private_dns_zone": {"key": "privateDNSZone", "type": "str"}, "enable_private_cluster_public_fqdn": {"key": "enablePrivateClusterPublicFQDN", "type": "bool"}, "disable_run_command": {"key": "disableRunCommand", "type": "bool"}, + "enable_vnet_integration": {"key": "enableVnetIntegration", "type": "bool"}, + "subnet_id": {"key": "subnetId", "type": "str"}, } def __init__( self, *, - authorized_ip_ranges: Optional[List[str]] = None, + authorized_ip_ranges: Optional[list[str]] = None, enable_private_cluster: Optional[bool] = None, private_dns_zone: Optional[str] = None, enable_private_cluster_public_fqdn: Optional[bool] = None, disable_run_command: Optional[bool] = None, + enable_vnet_integration: Optional[bool] = None, + subnet_id: Optional[str] = None, **kwargs: Any ) -> None: """ - :keyword authorized_ip_ranges: IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. - This feature is not compatible with clusters that use Public IP Per Node, or clusters that are - using a Basic Load Balancer. For more information see `API server authorized IP ranges + :keyword authorized_ip_ranges: The IP ranges authorized to access the Kubernetes API server. IP + ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible + with clusters that use Public IP Per Node, or clusters that are using a Basic Load Balancer. + For more information see `API server authorized IP ranges `_. :paramtype authorized_ip_ranges: list[str] - :keyword enable_private_cluster: For more details, see `Creating a private AKS cluster + :keyword enable_private_cluster: Whether to create the cluster as a private cluster or not. For + more details, see `Creating a private AKS cluster `_. :paramtype enable_private_cluster: bool - :keyword private_dns_zone: The default is System. For more details see `configure private DNS - zone `_. - Allowed values are 'system' and 'none'. + :keyword private_dns_zone: The private DNS zone mode for the cluster. The default is System. + For more details see `configure private DNS zone + `_. Allowed + values are 'system' and 'none'. :paramtype private_dns_zone: str :keyword enable_private_cluster_public_fqdn: Whether to create additional public FQDN for private cluster or not. :paramtype enable_private_cluster_public_fqdn: bool :keyword disable_run_command: Whether to disable run command for the cluster or not. :paramtype disable_run_command: bool + :keyword enable_vnet_integration: Whether to enable apiserver vnet integration for the cluster + or not. See aka.ms/AksVnetIntegration for more details. + :paramtype enable_vnet_integration: bool + :keyword subnet_id: The subnet to be used when apiserver vnet integration is enabled. It is + required when creating a new cluster with BYO Vnet, or when updating an existing cluster to + enable apiserver vnet integration. + :paramtype subnet_id: str """ super().__init__(**kwargs) self.authorized_ip_ranges = authorized_ip_ranges @@ -4492,19 +5266,23 @@ def __init__( self.private_dns_zone = private_dns_zone self.enable_private_cluster_public_fqdn = enable_private_cluster_public_fqdn self.disable_run_command = disable_run_command + self.enable_vnet_integration = enable_vnet_integration + self.subnet_id = subnet_id class ManagedClusterAutoUpgradeProfile(_serialization.Model): """Auto upgrade profile for a managed cluster. - :ivar upgrade_channel: For more information see `setting the AKS cluster auto-upgrade channel + :ivar upgrade_channel: The upgrade channel for auto upgrade. The default is 'none'. For more + information see `setting the AKS cluster auto-upgrade channel `_. Known values are: "rapid", "stable", "patch", "node-image", and "none". - :vartype upgrade_channel: str or ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeChannel - :ivar node_os_upgrade_channel: Manner in which the OS on your nodes is updated. The default is - NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and "SecurityPatch". + :vartype upgrade_channel: str or ~azure.mgmt.containerservice.models.UpgradeChannel + :ivar node_os_upgrade_channel: Node OS Upgrade Channel. Manner in which the OS on your nodes is + updated. The default is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and + "SecurityPatch". :vartype node_os_upgrade_channel: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NodeOSUpgradeChannel + ~azure.mgmt.containerservice.models.NodeOSUpgradeChannel """ _attribute_map = { @@ -4520,15 +5298,16 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword upgrade_channel: For more information see `setting the AKS cluster auto-upgrade - channel `_. - Known values are: "rapid", "stable", "patch", "node-image", and "none". - :paramtype upgrade_channel: str or - ~azure.mgmt.containerservice.v2024_07_01.models.UpgradeChannel - :keyword node_os_upgrade_channel: Manner in which the OS on your nodes is updated. The default - is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and "SecurityPatch". + :keyword upgrade_channel: The upgrade channel for auto upgrade. The default is 'none'. For more + information see `setting the AKS cluster auto-upgrade channel + `_. Known values + are: "rapid", "stable", "patch", "node-image", and "none". + :paramtype upgrade_channel: str or ~azure.mgmt.containerservice.models.UpgradeChannel + :keyword node_os_upgrade_channel: Node OS Upgrade Channel. Manner in which the OS on your nodes + is updated. The default is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and + "SecurityPatch". :paramtype node_os_upgrade_channel: str or - ~azure.mgmt.containerservice.v2024_07_01.models.NodeOSUpgradeChannel + ~azure.mgmt.containerservice.models.NodeOSUpgradeChannel """ super().__init__(**kwargs) self.upgrade_channel = upgrade_channel @@ -4542,8 +5321,7 @@ class ManagedClusterAzureMonitorProfile(_serialization.Model): Collect out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an overview. - :vartype metrics: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileMetrics + :vartype metrics: ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileMetrics """ _attribute_map = { @@ -4559,7 +5337,7 @@ def __init__( and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an overview. :paramtype metrics: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileMetrics + ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileMetrics """ super().__init__(**kwargs) self.metrics = metrics @@ -4627,7 +5405,7 @@ class ManagedClusterAzureMonitorProfileMetrics(_serialization.Model): These optional settings are for the kube-state-metrics pod that is deployed with the addon. See aka.ms/AzureManagedPrometheus-optional-parameters for details. :vartype kube_state_metrics: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileKubeStateMetrics + ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileKubeStateMetrics """ _validation = { @@ -4655,20 +5433,56 @@ def __init__( These optional settings are for the kube-state-metrics pod that is deployed with the addon. See aka.ms/AzureManagedPrometheus-optional-parameters for details. :paramtype kube_state_metrics: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAzureMonitorProfileKubeStateMetrics + ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileKubeStateMetrics """ super().__init__(**kwargs) self.enabled = enabled self.kube_state_metrics = kube_state_metrics +class ManagedClusterBootstrapProfile(_serialization.Model): + """The bootstrap profile. + + :ivar artifact_source: The artifact source. The source where the artifacts are downloaded from. + Known values are: "Cache" and "Direct". + :vartype artifact_source: str or ~azure.mgmt.containerservice.models.ArtifactSource + :ivar container_registry_id: The resource Id of Azure Container Registry. The registry must + have private network access, premium SKU and zone redundancy. + :vartype container_registry_id: str + """ + + _attribute_map = { + "artifact_source": {"key": "artifactSource", "type": "str"}, + "container_registry_id": {"key": "containerRegistryId", "type": "str"}, + } + + def __init__( + self, + *, + artifact_source: Union[str, "_models.ArtifactSource"] = "Direct", + container_registry_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword artifact_source: The artifact source. The source where the artifacts are downloaded + from. Known values are: "Cache" and "Direct". + :paramtype artifact_source: str or ~azure.mgmt.containerservice.models.ArtifactSource + :keyword container_registry_id: The resource Id of Azure Container Registry. The registry must + have private network access, premium SKU and zone redundancy. + :paramtype container_registry_id: str + """ + super().__init__(**kwargs) + self.artifact_source = artifact_source + self.container_registry_id = container_registry_id + + class ManagedClusterCostAnalysis(_serialization.Model): """The cost analysis configuration for the cluster. - :ivar enabled: The Managed Cluster sku.tier must be set to 'Standard' or 'Premium' to enable - this feature. Enabling this will add Kubernetes Namespace and Deployment details to the Cost - Analysis views in the Azure portal. If not specified, the default is false. For more - information see aka.ms/aks/docs/cost-analysis. + :ivar enabled: Whether to enable cost analysis. The Managed Cluster sku.tier must be set to + 'Standard' or 'Premium' to enable this feature. Enabling this will add Kubernetes Namespace and + Deployment details to the Cost Analysis views in the Azure portal. If not specified, the + default is false. For more information see aka.ms/aks/docs/cost-analysis. :vartype enabled: bool """ @@ -4678,10 +5492,10 @@ class ManagedClusterCostAnalysis(_serialization.Model): def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: """ - :keyword enabled: The Managed Cluster sku.tier must be set to 'Standard' or 'Premium' to enable - this feature. Enabling this will add Kubernetes Namespace and Deployment details to the Cost - Analysis views in the Azure portal. If not specified, the default is false. For more - information see aka.ms/aks/docs/cost-analysis. + :keyword enabled: Whether to enable cost analysis. The Managed Cluster sku.tier must be set to + 'Standard' or 'Premium' to enable this feature. Enabling this will add Kubernetes Namespace and + Deployment details to the Cost Analysis views in the Azure portal. If not specified, the + default is false. For more information see aka.ms/aks/docs/cost-analysis. :paramtype enabled: bool """ super().__init__(**kwargs) @@ -4713,7 +5527,7 @@ def __init__( *, http_proxy: Optional[str] = None, https_proxy: Optional[str] = None, - no_proxy: Optional[List[str]] = None, + no_proxy: Optional[list[str]] = None, trusted_ca: Optional[str] = None, **kwargs: Any ) -> None: @@ -4745,19 +5559,20 @@ class ManagedClusterIdentity(_serialization.Model): :ivar tenant_id: The tenant id of the system assigned identity which is used by master components. :vartype tenant_id: str - :ivar type: For more information see `use managed identities in AKS - `_. Known values are: - "SystemAssigned", "UserAssigned", and "None". - :vartype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ResourceIdentityType + :ivar type: The type of identity used for the managed cluster. For more information see `use + managed identities in AKS `_. Known + values are: "SystemAssigned", "UserAssigned", and "None". + :vartype type: str or ~azure.mgmt.containerservice.models.ResourceIdentityType :ivar delegated_resources: The delegated identity resources assigned to this managed cluster. This can only be set by another Azure Resource Provider, and managed cluster only accept one delegated identity resource. Internal use only. - :vartype delegated_resources: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.DelegatedResource] - :ivar user_assigned_identities: The keys must be ARM resource IDs in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + :vartype delegated_resources: dict[str, ~azure.mgmt.containerservice.models.DelegatedResource] + :ivar user_assigned_identities: The user identity associated with the managed cluster. This + identity will be used in control plane. Only one user assigned identity is allowed. The keys + must be ARM resource IDs in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. :vartype user_assigned_identities: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedServiceIdentityUserAssignedIdentitiesValue] + ~azure.mgmt.containerservice.models.ManagedServiceIdentityUserAssignedIdentitiesValue] """ _validation = { @@ -4780,30 +5595,32 @@ def __init__( self, *, type: Optional[Union[str, "_models.ResourceIdentityType"]] = None, - delegated_resources: Optional[Dict[str, "_models.DelegatedResource"]] = None, + delegated_resources: Optional[dict[str, "_models.DelegatedResource"]] = None, user_assigned_identities: Optional[ - Dict[str, "_models.ManagedServiceIdentityUserAssignedIdentitiesValue"] + dict[str, "_models.ManagedServiceIdentityUserAssignedIdentitiesValue"] ] = None, **kwargs: Any ) -> None: """ - :keyword type: For more information see `use managed identities in AKS - `_. Known values are: - "SystemAssigned", "UserAssigned", and "None". - :paramtype type: str or ~azure.mgmt.containerservice.v2024_07_01.models.ResourceIdentityType + :keyword type: The type of identity used for the managed cluster. For more information see `use + managed identities in AKS `_. Known + values are: "SystemAssigned", "UserAssigned", and "None". + :paramtype type: str or ~azure.mgmt.containerservice.models.ResourceIdentityType :keyword delegated_resources: The delegated identity resources assigned to this managed cluster. This can only be set by another Azure Resource Provider, and managed cluster only accept one delegated identity resource. Internal use only. :paramtype delegated_resources: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.DelegatedResource] - :keyword user_assigned_identities: The keys must be ARM resource IDs in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + ~azure.mgmt.containerservice.models.DelegatedResource] + :keyword user_assigned_identities: The user identity associated with the managed cluster. This + identity will be used in control plane. Only one user assigned identity is allowed. The keys + must be ARM resource IDs in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. :paramtype user_assigned_identities: dict[str, - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedServiceIdentityUserAssignedIdentitiesValue] + ~azure.mgmt.containerservice.models.ManagedServiceIdentityUserAssignedIdentitiesValue] """ super().__init__(**kwargs) - self.principal_id = None - self.tenant_id = None + self.principal_id: Optional[str] = None + self.tenant_id: Optional[str] = None self.type = type self.delegated_resources = delegated_resources self.user_assigned_identities = user_assigned_identities @@ -4816,7 +5633,7 @@ class ManagedClusterIngressProfile(_serialization.Model): and onboarding guide for this feature at https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default. :vartype web_app_routing: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfileWebAppRouting + ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileWebAppRouting """ _attribute_map = { @@ -4831,30 +5648,62 @@ def __init__( overview and onboarding guide for this feature at https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default. :paramtype web_app_routing: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterIngressProfileWebAppRouting + ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileWebAppRouting """ super().__init__(**kwargs) self.web_app_routing = web_app_routing -class ManagedClusterIngressProfileWebAppRouting(_serialization.Model): # pylint: disable=name-too-long - """Application Routing add-on settings for the ingress profile. +class ManagedClusterIngressProfileNginx(_serialization.Model): + """ManagedClusterIngressProfileNginx. - Variables are only populated by the server, and will be ignored when sending a request. + :ivar default_ingress_controller_type: Ingress type for the default NginxIngressController + custom resource. Known values are: "AnnotationControlled", "External", "Internal", and "None". + :vartype default_ingress_controller_type: str or + ~azure.mgmt.containerservice.models.NginxIngressControllerType + """ - :ivar enabled: Whether to enable the Application Routing add-on. - :vartype enabled: bool - :ivar dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the - Application Routing add-on. Used only when Application Routing add-on is enabled. Public and - private DNS zones can be in different resource groups, but all public DNS zones must be in the - same resource group and all private DNS zones must be in the same resource group. - :vartype dns_zone_resource_ids: list[str] + _attribute_map = { + "default_ingress_controller_type": {"key": "defaultIngressControllerType", "type": "str"}, + } + + def __init__( + self, + *, + default_ingress_controller_type: Optional[Union[str, "_models.NginxIngressControllerType"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword default_ingress_controller_type: Ingress type for the default NginxIngressController + custom resource. Known values are: "AnnotationControlled", "External", "Internal", and "None". + :paramtype default_ingress_controller_type: str or + ~azure.mgmt.containerservice.models.NginxIngressControllerType + """ + super().__init__(**kwargs) + self.default_ingress_controller_type = default_ingress_controller_type + + +class ManagedClusterIngressProfileWebAppRouting(_serialization.Model): # pylint: disable=name-too-long + """Application Routing add-on settings for the ingress profile. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar enabled: Whether to enable the Application Routing add-on. + :vartype enabled: bool + :ivar dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the + Application Routing add-on. Used only when Application Routing add-on is enabled. Public and + private DNS zones can be in different resource groups, but all public DNS zones must be in the + same resource group and all private DNS zones must be in the same resource group. + :vartype dns_zone_resource_ids: list[str] + :ivar nginx: Configuration for the default NginxIngressController. See more at + https://learn.microsoft.com/en-us/azure/aks/app-routing-nginx-configuration#the-default-nginx-ingress-controller. + :vartype nginx: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileNginx :ivar identity: Managed identity of the Application Routing add-on. This is the identity that should be granted permissions, for example, to manage the associated Azure DNS resource and get certificates from Azure Key Vault. See `this overview of the add-on `_ for more instructions. - :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + :vartype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity """ _validation = { @@ -4865,11 +5714,17 @@ class ManagedClusterIngressProfileWebAppRouting(_serialization.Model): # pylint _attribute_map = { "enabled": {"key": "enabled", "type": "bool"}, "dns_zone_resource_ids": {"key": "dnsZoneResourceIds", "type": "[str]"}, + "nginx": {"key": "nginx", "type": "ManagedClusterIngressProfileNginx"}, "identity": {"key": "identity", "type": "UserAssignedIdentity"}, } def __init__( - self, *, enabled: Optional[bool] = None, dns_zone_resource_ids: Optional[List[str]] = None, **kwargs: Any + self, + *, + enabled: Optional[bool] = None, + dns_zone_resource_ids: Optional[list[str]] = None, + nginx: Optional["_models.ManagedClusterIngressProfileNginx"] = None, + **kwargs: Any ) -> None: """ :keyword enabled: Whether to enable the Application Routing add-on. @@ -4879,11 +5734,15 @@ def __init__( private DNS zones can be in different resource groups, but all public DNS zones must be in the same resource group and all private DNS zones must be in the same resource group. :paramtype dns_zone_resource_ids: list[str] + :keyword nginx: Configuration for the default NginxIngressController. See more at + https://learn.microsoft.com/en-us/azure/aks/app-routing-nginx-configuration#the-default-nginx-ingress-controller. + :paramtype nginx: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileNginx """ super().__init__(**kwargs) self.enabled = enabled self.dns_zone_resource_ids = dns_zone_resource_ids - self.identity = None + self.nginx = nginx + self.identity: Optional["_models.UserAssignedIdentity"] = None class ManagedClusterListResult(_serialization.Model): @@ -4892,7 +5751,7 @@ class ManagedClusterListResult(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of managed clusters. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :vartype value: list[~azure.mgmt.containerservice.models.ManagedCluster] :ivar next_link: The URL to get the next set of managed cluster results. :vartype next_link: str """ @@ -4906,32 +5765,33 @@ class ManagedClusterListResult(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.ManagedCluster"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.ManagedCluster"]] = None, **kwargs: Any) -> None: """ :keyword value: The list of managed clusters. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :paramtype value: list[~azure.mgmt.containerservice.models.ManagedCluster] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class ManagedClusterLoadBalancerProfile(_serialization.Model): """Profile of the managed cluster load balancer. + Variables are only populated by the server, and will be ignored when sending a request. + :ivar managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer. :vartype managed_outbound_i_ps: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs :ivar outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer. :vartype outbound_ip_prefixes: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes :ivar outbound_i_ps: Desired outbound IP resources for the cluster load balancer. :vartype outbound_i_ps: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPs + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPs :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster load balancer. - :vartype effective_outbound_i_ps: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :vartype effective_outbound_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference] :ivar allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. @@ -4944,11 +5804,11 @@ class ManagedClusterLoadBalancerProfile(_serialization.Model): :vartype enable_multiple_standard_load_balancers: bool :ivar backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known values are: "NodeIPConfiguration" and "NodeIP". - :vartype backend_pool_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.BackendPoolType + :vartype backend_pool_type: str or ~azure.mgmt.containerservice.models.BackendPoolType """ _validation = { + "effective_outbound_i_ps": {"readonly": True}, "allocated_outbound_ports": {"maximum": 64000, "minimum": 0}, "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4}, } @@ -4976,7 +5836,6 @@ def __init__( managed_outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs"] = None, outbound_ip_prefixes: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes"] = None, outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPs"] = None, - effective_outbound_i_ps: Optional[List["_models.ResourceReference"]] = None, allocated_outbound_ports: int = 0, idle_timeout_in_minutes: int = 30, enable_multiple_standard_load_balancers: Optional[bool] = None, @@ -4986,18 +5845,14 @@ def __init__( """ :keyword managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer. :paramtype managed_outbound_i_ps: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs :keyword outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer. :paramtype outbound_ip_prefixes: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes :keyword outbound_i_ps: Desired outbound IP resources for the cluster load balancer. :paramtype outbound_i_ps: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPs - :keyword effective_outbound_i_ps: The effective outbound IP resources of the cluster load - balancer. - :paramtype effective_outbound_i_ps: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPs :keyword allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. @@ -5010,14 +5865,13 @@ def __init__( :paramtype enable_multiple_standard_load_balancers: bool :keyword backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known values are: "NodeIPConfiguration" and "NodeIP". - :paramtype backend_pool_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.BackendPoolType + :paramtype backend_pool_type: str or ~azure.mgmt.containerservice.models.BackendPoolType """ super().__init__(**kwargs) self.managed_outbound_i_ps = managed_outbound_i_ps self.outbound_ip_prefixes = outbound_ip_prefixes self.outbound_i_ps = outbound_i_ps - self.effective_outbound_i_ps = effective_outbound_i_ps + self.effective_outbound_i_ps: Optional[list["_models.ResourceReference"]] = None self.allocated_outbound_ports = allocated_outbound_ports self.idle_timeout_in_minutes = idle_timeout_in_minutes self.enable_multiple_standard_load_balancers = enable_multiple_standard_load_balancers @@ -5067,8 +5921,7 @@ class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(_serialization.Model): """Desired outbound IP Prefix resources for the cluster load balancer. :ivar public_ip_prefixes: A list of public IP prefix resources. - :vartype public_ip_prefixes: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :vartype public_ip_prefixes: list[~azure.mgmt.containerservice.models.ResourceReference] """ _attribute_map = { @@ -5076,12 +5929,11 @@ class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(_serialization.Model): } def __init__( - self, *, public_ip_prefixes: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any + self, *, public_ip_prefixes: Optional[list["_models.ResourceReference"]] = None, **kwargs: Any ) -> None: """ :keyword public_ip_prefixes: A list of public IP prefix resources. - :paramtype public_ip_prefixes: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :paramtype public_ip_prefixes: list[~azure.mgmt.containerservice.models.ResourceReference] """ super().__init__(**kwargs) self.public_ip_prefixes = public_ip_prefixes @@ -5091,17 +5943,17 @@ class ManagedClusterLoadBalancerProfileOutboundIPs(_serialization.Model): # pyl """Desired outbound IP resources for the cluster load balancer. :ivar public_i_ps: A list of public IP resources. - :vartype public_i_ps: list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :vartype public_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference] """ _attribute_map = { "public_i_ps": {"key": "publicIPs", "type": "[ResourceReference]"}, } - def __init__(self, *, public_i_ps: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any) -> None: + def __init__(self, *, public_i_ps: Optional[list["_models.ResourceReference"]] = None, **kwargs: Any) -> None: """ :keyword public_i_ps: A list of public IP resources. - :paramtype public_i_ps: list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :paramtype public_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference] """ super().__init__(**kwargs) self.public_i_ps = public_i_ps @@ -5136,9 +5988,8 @@ def __init__(self, *, count: int = 1, **kwargs: Any) -> None: class ManagedClusterMetricsProfile(_serialization.Model): """The metrics profile for the ManagedCluster. - :ivar cost_analysis: The cost analysis configuration for the cluster. - :vartype cost_analysis: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterCostAnalysis + :ivar cost_analysis: The configuration for detailed per-Kubernetes resource cost analysis. + :vartype cost_analysis: ~azure.mgmt.containerservice.models.ManagedClusterCostAnalysis """ _attribute_map = { @@ -5147,9 +5998,8 @@ class ManagedClusterMetricsProfile(_serialization.Model): def __init__(self, *, cost_analysis: Optional["_models.ManagedClusterCostAnalysis"] = None, **kwargs: Any) -> None: """ - :keyword cost_analysis: The cost analysis configuration for the cluster. - :paramtype cost_analysis: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterCostAnalysis + :keyword cost_analysis: The configuration for detailed per-Kubernetes resource cost analysis. + :paramtype cost_analysis: ~azure.mgmt.containerservice.models.ManagedClusterCostAnalysis """ super().__init__(**kwargs) self.cost_analysis = cost_analysis @@ -5158,19 +6008,21 @@ def __init__(self, *, cost_analysis: Optional["_models.ManagedClusterCostAnalysi class ManagedClusterNATGatewayProfile(_serialization.Model): """Profile of the managed cluster NAT gateway. + Variables are only populated by the server, and will be ignored when sending a request. + :ivar managed_outbound_ip_profile: Profile of the managed outbound IP resources of the cluster NAT gateway. :vartype managed_outbound_ip_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterManagedOutboundIPProfile + ~azure.mgmt.containerservice.models.ManagedClusterManagedOutboundIPProfile :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster NAT gateway. - :vartype effective_outbound_i_ps: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + :vartype effective_outbound_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference] :ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values are in the range of 4 to 120 (inclusive). The default value is 4 minutes. :vartype idle_timeout_in_minutes: int """ _validation = { + "effective_outbound_i_ps": {"readonly": True}, "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4}, } @@ -5187,7 +6039,6 @@ def __init__( self, *, managed_outbound_ip_profile: Optional["_models.ManagedClusterManagedOutboundIPProfile"] = None, - effective_outbound_i_ps: Optional[List["_models.ResourceReference"]] = None, idle_timeout_in_minutes: int = 4, **kwargs: Any ) -> None: @@ -5195,21 +6046,90 @@ def __init__( :keyword managed_outbound_ip_profile: Profile of the managed outbound IP resources of the cluster NAT gateway. :paramtype managed_outbound_ip_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterManagedOutboundIPProfile - :keyword effective_outbound_i_ps: The effective outbound IP resources of the cluster NAT - gateway. - :paramtype effective_outbound_i_ps: - list[~azure.mgmt.containerservice.v2024_07_01.models.ResourceReference] + ~azure.mgmt.containerservice.models.ManagedClusterManagedOutboundIPProfile :keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values are in the range of 4 to 120 (inclusive). The default value is 4 minutes. :paramtype idle_timeout_in_minutes: int """ super().__init__(**kwargs) self.managed_outbound_ip_profile = managed_outbound_ip_profile - self.effective_outbound_i_ps = effective_outbound_i_ps + self.effective_outbound_i_ps: Optional[list["_models.ResourceReference"]] = None self.idle_timeout_in_minutes = idle_timeout_in_minutes +class ManagedClusterNodeProvisioningProfile(_serialization.Model): + """ManagedClusterNodeProvisioningProfile. + + :ivar mode: The node provisioning mode. If not specified, the default is Manual. Known values + are: "Manual" and "Auto". + :vartype mode: str or ~azure.mgmt.containerservice.models.NodeProvisioningMode + :ivar default_node_pools: The set of default Karpenter NodePools (CRDs) configured for node + provisioning. This field has no effect unless mode is 'Auto'. Warning: Changing this from Auto + to None on an existing cluster will cause the default Karpenter NodePools to be deleted, which + will drain and delete the nodes associated with those pools. It is strongly recommended to not + do this unless there are idle nodes ready to take the pods evicted by that action. If not + specified, the default is Auto. For more information see aka.ms/aks/nap#node-pools. Known + values are: "None" and "Auto". + :vartype default_node_pools: str or + ~azure.mgmt.containerservice.models.NodeProvisioningDefaultNodePools + """ + + _attribute_map = { + "mode": {"key": "mode", "type": "str"}, + "default_node_pools": {"key": "defaultNodePools", "type": "str"}, + } + + def __init__( + self, + *, + mode: Optional[Union[str, "_models.NodeProvisioningMode"]] = None, + default_node_pools: Union[str, "_models.NodeProvisioningDefaultNodePools"] = "Auto", + **kwargs: Any + ) -> None: + """ + :keyword mode: The node provisioning mode. If not specified, the default is Manual. Known + values are: "Manual" and "Auto". + :paramtype mode: str or ~azure.mgmt.containerservice.models.NodeProvisioningMode + :keyword default_node_pools: The set of default Karpenter NodePools (CRDs) configured for node + provisioning. This field has no effect unless mode is 'Auto'. Warning: Changing this from Auto + to None on an existing cluster will cause the default Karpenter NodePools to be deleted, which + will drain and delete the nodes associated with those pools. It is strongly recommended to not + do this unless there are idle nodes ready to take the pods evicted by that action. If not + specified, the default is Auto. For more information see aka.ms/aks/nap#node-pools. Known + values are: "None" and "Auto". + :paramtype default_node_pools: str or + ~azure.mgmt.containerservice.models.NodeProvisioningDefaultNodePools + """ + super().__init__(**kwargs) + self.mode = mode + self.default_node_pools = default_node_pools + + +class ManagedClusterNodeResourceGroupProfile(_serialization.Model): + """Node resource group lockdown profile for a managed cluster. + + :ivar restriction_level: The restriction level applied to the cluster's node resource group. If + not specified, the default is 'Unrestricted'. Known values are: "Unrestricted" and "ReadOnly". + :vartype restriction_level: str or ~azure.mgmt.containerservice.models.RestrictionLevel + """ + + _attribute_map = { + "restriction_level": {"key": "restrictionLevel", "type": "str"}, + } + + def __init__( + self, *, restriction_level: Optional[Union[str, "_models.RestrictionLevel"]] = None, **kwargs: Any + ) -> None: + """ + :keyword restriction_level: The restriction level applied to the cluster's node resource group. + If not specified, the default is 'Unrestricted'. Known values are: "Unrestricted" and + "ReadOnly". + :paramtype restriction_level: str or ~azure.mgmt.containerservice.models.RestrictionLevel + """ + super().__init__(**kwargs) + self.restriction_level = restriction_level + + class ManagedClusterOIDCIssuerProfile(_serialization.Model): """The OIDC issuer profile of the Managed Cluster. @@ -5236,7 +6156,7 @@ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: :paramtype enabled: bool """ super().__init__(**kwargs) - self.issuer_url = None + self.issuer_url: Optional[str] = None self.enabled = enabled @@ -5254,14 +6174,14 @@ class ManagedClusterPodIdentity(_serialization.Model): :ivar binding_selector: The binding selector to use for the AzureIdentityBinding resource. :vartype binding_selector: str :ivar identity: The user assigned identity details. Required. - :vartype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + :vartype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity :ivar provisioning_state: The current provisioning state of the pod identity. Known values are: "Assigned", "Canceled", "Deleting", "Failed", "Succeeded", and "Updating". :vartype provisioning_state: str or - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningState + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningState :ivar provisioning_info: :vartype provisioning_info: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningInfo + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningInfo """ _validation = { @@ -5298,19 +6218,21 @@ def __init__( :keyword binding_selector: The binding selector to use for the AzureIdentityBinding resource. :paramtype binding_selector: str :keyword identity: The user assigned identity details. Required. - :paramtype identity: ~azure.mgmt.containerservice.v2024_07_01.models.UserAssignedIdentity + :paramtype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity """ super().__init__(**kwargs) self.name = name self.namespace = namespace self.binding_selector = binding_selector self.identity = identity - self.provisioning_state = None - self.provisioning_info = None + self.provisioning_state: Optional[Union[str, "_models.ManagedClusterPodIdentityProvisioningState"]] = None + self.provisioning_info: Optional["_models.ManagedClusterPodIdentityProvisioningInfo"] = None class ManagedClusterPodIdentityException(_serialization.Model): - """See `disable AAD Pod Identity for a specific Pod/Application + """A pod identity exception, which allows pods with certain labels to access the Azure Instance + Metadata Service (IMDS) endpoint without being intercepted by the node-managed identity (NMI) + server. See `disable AAD Pod Identity for a specific Pod/Application `_ for more details. @@ -5336,7 +6258,7 @@ class ManagedClusterPodIdentityException(_serialization.Model): "pod_labels": {"key": "podLabels", "type": "{str}"}, } - def __init__(self, *, name: str, namespace: str, pod_labels: Dict[str, str], **kwargs: Any) -> None: + def __init__(self, *, name: str, namespace: str, pod_labels: dict[str, str], **kwargs: Any) -> None: """ :keyword name: The name of the pod identity exception. Required. :paramtype name: str @@ -5352,23 +6274,25 @@ def __init__(self, *, name: str, namespace: str, pod_labels: Dict[str, str], **k class ManagedClusterPodIdentityProfile(_serialization.Model): - """See `use AAD pod identity `_ - for more details on pod identity integration. + """The pod identity profile of the Managed Cluster. See `use AAD pod identity + `_ for more details on pod + identity integration. :ivar enabled: Whether the pod identity addon is enabled. :vartype enabled: bool - :ivar allow_network_plugin_kubenet: Running in Kubenet is disabled by default due to the - security related nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet - network plugin with AAD Pod Identity - `_ # pylint: disable=line-too-long + :ivar allow_network_plugin_kubenet: Whether pod identity is allowed to run on clusters with + Kubenet networking. Running in Kubenet is disabled by default due to the security related + nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet network plugin with + AAD Pod Identity + `_ for more information. :vartype allow_network_plugin_kubenet: bool :ivar user_assigned_identities: The pod identities to use in the cluster. :vartype user_assigned_identities: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentity] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentity] :ivar user_assigned_identity_exceptions: The pod identity exceptions to allow. :vartype user_assigned_identity_exceptions: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityException] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityException] """ _attribute_map = { @@ -5386,25 +6310,26 @@ def __init__( *, enabled: Optional[bool] = None, allow_network_plugin_kubenet: Optional[bool] = None, - user_assigned_identities: Optional[List["_models.ManagedClusterPodIdentity"]] = None, - user_assigned_identity_exceptions: Optional[List["_models.ManagedClusterPodIdentityException"]] = None, + user_assigned_identities: Optional[list["_models.ManagedClusterPodIdentity"]] = None, + user_assigned_identity_exceptions: Optional[list["_models.ManagedClusterPodIdentityException"]] = None, **kwargs: Any ) -> None: """ :keyword enabled: Whether the pod identity addon is enabled. :paramtype enabled: bool - :keyword allow_network_plugin_kubenet: Running in Kubenet is disabled by default due to the - security related nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet - network plugin with AAD Pod Identity - `_ # pylint: disable=line-too-long + :keyword allow_network_plugin_kubenet: Whether pod identity is allowed to run on clusters with + Kubenet networking. Running in Kubenet is disabled by default due to the security related + nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet network plugin with + AAD Pod Identity + `_ for more information. :paramtype allow_network_plugin_kubenet: bool :keyword user_assigned_identities: The pod identities to use in the cluster. :paramtype user_assigned_identities: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentity] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentity] :keyword user_assigned_identity_exceptions: The pod identity exceptions to allow. :paramtype user_assigned_identity_exceptions: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityException] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityException] """ super().__init__(**kwargs) self.enabled = enabled @@ -5418,7 +6343,7 @@ class ManagedClusterPodIdentityProvisioningError(_serialization.Model): # pylin :ivar error: Details about the error. :vartype error: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody """ _attribute_map = { @@ -5431,7 +6356,7 @@ def __init__( """ :keyword error: Details about the error. :paramtype error: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody """ super().__init__(**kwargs) self.error = error @@ -5451,7 +6376,7 @@ class ManagedClusterPodIdentityProvisioningErrorBody(_serialization.Model): # p :vartype target: str :ivar details: A list of additional details about the error. :vartype details: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody] """ _attribute_map = { @@ -5467,7 +6392,7 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, target: Optional[str] = None, - details: Optional[List["_models.ManagedClusterPodIdentityProvisioningErrorBody"]] = None, + details: Optional[list["_models.ManagedClusterPodIdentityProvisioningErrorBody"]] = None, **kwargs: Any ) -> None: """ @@ -5482,7 +6407,7 @@ def __init__( :paramtype target: str :keyword details: A list of additional details about the error. :paramtype details: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningErrorBody] + list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody] """ super().__init__(**kwargs) self.code = code @@ -5495,8 +6420,7 @@ class ManagedClusterPodIdentityProvisioningInfo(_serialization.Model): # pylint """ManagedClusterPodIdentityProvisioningInfo. :ivar error: Pod identity assignment error (if any). - :vartype error: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningError + :vartype error: ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningError """ _attribute_map = { @@ -5509,7 +6433,7 @@ def __init__( """ :keyword error: Pod identity assignment error (if any). :paramtype error: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPodIdentityProvisioningError + ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningError """ super().__init__(**kwargs) self.error = error @@ -5526,10 +6450,10 @@ class ManagedClusterPoolUpgradeProfile(_serialization.Model): :vartype name: str :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar upgrades: List of orchestrator types and versions available for upgrade. :vartype upgrades: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem] + list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfileUpgradesItem] """ _validation = { @@ -5550,7 +6474,7 @@ def __init__( kubernetes_version: str, os_type: Union[str, "_models.OSType"] = "Linux", name: Optional[str] = None, - upgrades: Optional[List["_models.ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None, + upgrades: Optional[list["_models.ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None, **kwargs: Any ) -> None: """ @@ -5560,10 +6484,10 @@ def __init__( :paramtype name: str :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :paramtype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType :keyword upgrades: List of orchestrator types and versions available for upgrade. :paramtype upgrades: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem] + list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfileUpgradesItem] """ super().__init__(**kwargs) self.kubernetes_version = kubernetes_version @@ -5600,68 +6524,88 @@ def __init__( self.is_preview = is_preview -class ManagedClusterPropertiesAutoScalerProfile( - _serialization.Model -): # pylint: disable=too-many-instance-attributes,name-too-long +class ManagedClusterPropertiesAutoScalerProfile(_serialization.Model): # pylint: disable=name-too-long """Parameters to be applied to the cluster-autoscaler when enabled. - :ivar balance_similar_node_groups: Valid values are 'true' and 'false'. + :ivar balance_similar_node_groups: Detects similar node pools and balances the number of nodes + between them. Valid values are 'true' and 'false'. :vartype balance_similar_node_groups: str - :ivar daemonset_eviction_for_empty_nodes: If set to true, all daemonset pods on empty nodes - will be evicted before deletion of the node. If the daemonset pod cannot be evicted another - node will be chosen for scaling. If set to false, the node will be deleted without ensuring - that daemonset pods are deleted or evicted. + :ivar daemonset_eviction_for_empty_nodes: DaemonSet pods will be gracefully terminated from + empty nodes. If set to true, all daemonset pods on empty nodes will be evicted before deletion + of the node. If the daemonset pod cannot be evicted another node will be chosen for scaling. If + set to false, the node will be deleted without ensuring that daemonset pods are deleted or + evicted. :vartype daemonset_eviction_for_empty_nodes: bool - :ivar daemonset_eviction_for_occupied_nodes: If set to true, all daemonset pods on occupied - nodes will be evicted before deletion of the node. If the daemonset pod cannot be evicted - another node will be chosen for scaling. If set to false, the node will be deleted without - ensuring that daemonset pods are deleted or evicted. + :ivar daemonset_eviction_for_occupied_nodes: DaemonSet pods will be gracefully terminated from + non-empty nodes. If set to true, all daemonset pods on occupied nodes will be evicted before + deletion of the node. If the daemonset pod cannot be evicted another node will be chosen for + scaling. If set to false, the node will be deleted without ensuring that daemonset pods are + deleted or evicted. :vartype daemonset_eviction_for_occupied_nodes: bool - :ivar ignore_daemonsets_utilization: If set to true, the resources used by daemonset will be - taken into account when making scaling down decisions. + :ivar ignore_daemonsets_utilization: Should CA ignore DaemonSet pods when calculating resource + utilization for scaling down. If set to true, the resources used by daemonset will be taken + into account when making scaling down decisions. :vartype ignore_daemonsets_utilization: bool - :ivar expander: If not specified, the default is 'random'. See `expanders + :ivar expander: The expander to use when scaling up. If not specified, the default is 'random'. + See `expanders `_ for more information. Known values are: "least-waste", "most-pods", "priority", and "random". - :vartype expander: str or ~azure.mgmt.containerservice.v2024_07_01.models.Expander - :ivar max_empty_bulk_delete: The default is 10. + :vartype expander: str or ~azure.mgmt.containerservice.models.Expander + :ivar max_empty_bulk_delete: The maximum number of empty nodes that can be deleted at the same + time. This must be a positive integer. The default is 10. :vartype max_empty_bulk_delete: str - :ivar max_graceful_termination_sec: The default is 600. + :ivar max_graceful_termination_sec: The maximum number of seconds the cluster autoscaler waits + for pod termination when trying to scale down a node. The default is 600. :vartype max_graceful_termination_sec: str - :ivar max_node_provision_time: The default is '15m'. Values must be an integer followed by an - 'm'. No unit of time other than minutes (m) is supported. + :ivar max_node_provision_time: The maximum time the autoscaler waits for a node to be + provisioned. The default is '15m'. Values must be an integer followed by an 'm'. No unit of + time other than minutes (m) is supported. :vartype max_node_provision_time: str - :ivar max_total_unready_percentage: The default is 45. The maximum is 100 and the minimum is 0. + :ivar max_total_unready_percentage: The maximum percentage of unready nodes in the cluster. + After this percentage is exceeded, cluster autoscaler halts operations. The default is 45. The + maximum is 100 and the minimum is 0. :vartype max_total_unready_percentage: str - :ivar new_pod_scale_up_delay: For scenarios like burst/batch scale where you don't want CA to - act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore - unscheduled pods before they're a certain age. The default is '0s'. Values must be an integer - followed by a unit ('s' for seconds, 'm' for minutes, 'h' for hours, etc). + :ivar new_pod_scale_up_delay: Ignore unscheduled pods before they're a certain age. For + scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler + could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a + certain age. The default is '0s'. Values must be an integer followed by a unit ('s' for + seconds, 'm' for minutes, 'h' for hours, etc). :vartype new_pod_scale_up_delay: str - :ivar ok_total_unready_count: This must be an integer. The default is 3. + :ivar ok_total_unready_count: The number of allowed unready nodes, irrespective of + max-total-unready-percentage. This must be an integer. The default is 3. :vartype ok_total_unready_count: str - :ivar scan_interval: The default is '10'. Values must be an integer number of seconds. + :ivar scan_interval: How often cluster is reevaluated for scale up or down. The default is + '10'. Values must be an integer number of seconds. :vartype scan_interval: str - :ivar scale_down_delay_after_add: The default is '10m'. Values must be an integer followed by - an 'm'. No unit of time other than minutes (m) is supported. + :ivar scale_down_delay_after_add: How long after scale up that scale down evaluation resumes. + The default is '10m'. Values must be an integer followed by an 'm'. No unit of time other than + minutes (m) is supported. :vartype scale_down_delay_after_add: str - :ivar scale_down_delay_after_delete: The default is the scan-interval. Values must be an - integer followed by an 'm'. No unit of time other than minutes (m) is supported. + :ivar scale_down_delay_after_delete: How long after node deletion that scale down evaluation + resumes. The default is the scan-interval. Values must be an integer followed by an 'm'. No + unit of time other than minutes (m) is supported. :vartype scale_down_delay_after_delete: str - :ivar scale_down_delay_after_failure: The default is '3m'. Values must be an integer followed - by an 'm'. No unit of time other than minutes (m) is supported. + :ivar scale_down_delay_after_failure: How long after scale down failure that scale down + evaluation resumes. The default is '3m'. Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. :vartype scale_down_delay_after_failure: str - :ivar scale_down_unneeded_time: The default is '10m'. Values must be an integer followed by an - 'm'. No unit of time other than minutes (m) is supported. + :ivar scale_down_unneeded_time: How long a node should be unneeded before it is eligible for + scale down. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time + other than minutes (m) is supported. :vartype scale_down_unneeded_time: str - :ivar scale_down_unready_time: The default is '20m'. Values must be an integer followed by an - 'm'. No unit of time other than minutes (m) is supported. + :ivar scale_down_unready_time: How long an unready node should be unneeded before it is + eligible for scale down. The default is '20m'. Values must be an integer followed by an 'm'. No + unit of time other than minutes (m) is supported. :vartype scale_down_unready_time: str - :ivar scale_down_utilization_threshold: The default is '0.5'. + :ivar scale_down_utilization_threshold: Node utilization level, defined as sum of requested + resources divided by capacity, below which a node can be considered for scale down. The default + is '0.5'. :vartype scale_down_utilization_threshold: str - :ivar skip_nodes_with_local_storage: The default is true. + :ivar skip_nodes_with_local_storage: If cluster autoscaler will skip deleting nodes with pods + with local storage, for example, EmptyDir or HostPath. The default is true. :vartype skip_nodes_with_local_storage: str - :ivar skip_nodes_with_system_pods: The default is true. + :ivar skip_nodes_with_system_pods: If cluster autoscaler will skip deleting nodes with pods + from kube-system (except for DaemonSet or mirror pods). The default is true. :vartype skip_nodes_with_system_pods: str """ @@ -5714,64 +6658,85 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword balance_similar_node_groups: Valid values are 'true' and 'false'. + :keyword balance_similar_node_groups: Detects similar node pools and balances the number of + nodes between them. Valid values are 'true' and 'false'. :paramtype balance_similar_node_groups: str - :keyword daemonset_eviction_for_empty_nodes: If set to true, all daemonset pods on empty nodes - will be evicted before deletion of the node. If the daemonset pod cannot be evicted another - node will be chosen for scaling. If set to false, the node will be deleted without ensuring - that daemonset pods are deleted or evicted. + :keyword daemonset_eviction_for_empty_nodes: DaemonSet pods will be gracefully terminated from + empty nodes. If set to true, all daemonset pods on empty nodes will be evicted before deletion + of the node. If the daemonset pod cannot be evicted another node will be chosen for scaling. If + set to false, the node will be deleted without ensuring that daemonset pods are deleted or + evicted. :paramtype daemonset_eviction_for_empty_nodes: bool - :keyword daemonset_eviction_for_occupied_nodes: If set to true, all daemonset pods on occupied - nodes will be evicted before deletion of the node. If the daemonset pod cannot be evicted - another node will be chosen for scaling. If set to false, the node will be deleted without - ensuring that daemonset pods are deleted or evicted. + :keyword daemonset_eviction_for_occupied_nodes: DaemonSet pods will be gracefully terminated + from non-empty nodes. If set to true, all daemonset pods on occupied nodes will be evicted + before deletion of the node. If the daemonset pod cannot be evicted another node will be chosen + for scaling. If set to false, the node will be deleted without ensuring that daemonset pods are + deleted or evicted. :paramtype daemonset_eviction_for_occupied_nodes: bool - :keyword ignore_daemonsets_utilization: If set to true, the resources used by daemonset will be + :keyword ignore_daemonsets_utilization: Should CA ignore DaemonSet pods when calculating + resource utilization for scaling down. If set to true, the resources used by daemonset will be taken into account when making scaling down decisions. :paramtype ignore_daemonsets_utilization: bool - :keyword expander: If not specified, the default is 'random'. See `expanders + :keyword expander: The expander to use when scaling up. If not specified, the default is + 'random'. See `expanders `_ for more information. Known values are: "least-waste", "most-pods", "priority", and "random". - :paramtype expander: str or ~azure.mgmt.containerservice.v2024_07_01.models.Expander - :keyword max_empty_bulk_delete: The default is 10. + :paramtype expander: str or ~azure.mgmt.containerservice.models.Expander + :keyword max_empty_bulk_delete: The maximum number of empty nodes that can be deleted at the + same time. This must be a positive integer. The default is 10. :paramtype max_empty_bulk_delete: str - :keyword max_graceful_termination_sec: The default is 600. + :keyword max_graceful_termination_sec: The maximum number of seconds the cluster autoscaler + waits for pod termination when trying to scale down a node. The default is 600. :paramtype max_graceful_termination_sec: str - :keyword max_node_provision_time: The default is '15m'. Values must be an integer followed by - an 'm'. No unit of time other than minutes (m) is supported. + :keyword max_node_provision_time: The maximum time the autoscaler waits for a node to be + provisioned. The default is '15m'. Values must be an integer followed by an 'm'. No unit of + time other than minutes (m) is supported. :paramtype max_node_provision_time: str - :keyword max_total_unready_percentage: The default is 45. The maximum is 100 and the minimum is - 0. + :keyword max_total_unready_percentage: The maximum percentage of unready nodes in the cluster. + After this percentage is exceeded, cluster autoscaler halts operations. The default is 45. The + maximum is 100 and the minimum is 0. :paramtype max_total_unready_percentage: str - :keyword new_pod_scale_up_delay: For scenarios like burst/batch scale where you don't want CA - to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore - unscheduled pods before they're a certain age. The default is '0s'. Values must be an integer - followed by a unit ('s' for seconds, 'm' for minutes, 'h' for hours, etc). + :keyword new_pod_scale_up_delay: Ignore unscheduled pods before they're a certain age. For + scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler + could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a + certain age. The default is '0s'. Values must be an integer followed by a unit ('s' for + seconds, 'm' for minutes, 'h' for hours, etc). :paramtype new_pod_scale_up_delay: str - :keyword ok_total_unready_count: This must be an integer. The default is 3. + :keyword ok_total_unready_count: The number of allowed unready nodes, irrespective of + max-total-unready-percentage. This must be an integer. The default is 3. :paramtype ok_total_unready_count: str - :keyword scan_interval: The default is '10'. Values must be an integer number of seconds. + :keyword scan_interval: How often cluster is reevaluated for scale up or down. The default is + '10'. Values must be an integer number of seconds. :paramtype scan_interval: str - :keyword scale_down_delay_after_add: The default is '10m'. Values must be an integer followed - by an 'm'. No unit of time other than minutes (m) is supported. + :keyword scale_down_delay_after_add: How long after scale up that scale down evaluation + resumes. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time + other than minutes (m) is supported. :paramtype scale_down_delay_after_add: str - :keyword scale_down_delay_after_delete: The default is the scan-interval. Values must be an - integer followed by an 'm'. No unit of time other than minutes (m) is supported. + :keyword scale_down_delay_after_delete: How long after node deletion that scale down evaluation + resumes. The default is the scan-interval. Values must be an integer followed by an 'm'. No + unit of time other than minutes (m) is supported. :paramtype scale_down_delay_after_delete: str - :keyword scale_down_delay_after_failure: The default is '3m'. Values must be an integer - followed by an 'm'. No unit of time other than minutes (m) is supported. + :keyword scale_down_delay_after_failure: How long after scale down failure that scale down + evaluation resumes. The default is '3m'. Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. :paramtype scale_down_delay_after_failure: str - :keyword scale_down_unneeded_time: The default is '10m'. Values must be an integer followed by - an 'm'. No unit of time other than minutes (m) is supported. + :keyword scale_down_unneeded_time: How long a node should be unneeded before it is eligible for + scale down. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time + other than minutes (m) is supported. :paramtype scale_down_unneeded_time: str - :keyword scale_down_unready_time: The default is '20m'. Values must be an integer followed by - an 'm'. No unit of time other than minutes (m) is supported. + :keyword scale_down_unready_time: How long an unready node should be unneeded before it is + eligible for scale down. The default is '20m'. Values must be an integer followed by an 'm'. No + unit of time other than minutes (m) is supported. :paramtype scale_down_unready_time: str - :keyword scale_down_utilization_threshold: The default is '0.5'. + :keyword scale_down_utilization_threshold: Node utilization level, defined as sum of requested + resources divided by capacity, below which a node can be considered for scale down. The default + is '0.5'. :paramtype scale_down_utilization_threshold: str - :keyword skip_nodes_with_local_storage: The default is true. + :keyword skip_nodes_with_local_storage: If cluster autoscaler will skip deleting nodes with + pods with local storage, for example, EmptyDir or HostPath. The default is true. :paramtype skip_nodes_with_local_storage: str - :keyword skip_nodes_with_system_pods: The default is true. + :keyword skip_nodes_with_system_pods: If cluster autoscaler will skip deleting nodes with pods + from kube-system (except for DaemonSet or mirror pods). The default is true. :paramtype skip_nodes_with_system_pods: str """ super().__init__(**kwargs) @@ -5801,27 +6766,35 @@ class ManagedClusterSecurityProfile(_serialization.Model): """Security profile for the container service cluster. :ivar defender: Microsoft Defender settings for the security profile. - :vartype defender: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefender + :vartype defender: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefender :ivar azure_key_vault_kms: Azure Key Vault `key management service `_ settings for the security profile. - :vartype azure_key_vault_kms: ~azure.mgmt.containerservice.v2024_07_01.models.AzureKeyVaultKms + :vartype azure_key_vault_kms: ~azure.mgmt.containerservice.models.AzureKeyVaultKms :ivar workload_identity: Workload identity settings for the security profile. Workload identity enables Kubernetes applications to access Azure cloud resources securely with Azure AD. See https://aka.ms/aks/wi for more details. :vartype workload_identity: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileWorkloadIdentity + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileWorkloadIdentity :ivar image_cleaner: Image Cleaner settings for the security profile. :vartype image_cleaner: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileImageCleaner + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileImageCleaner + :ivar custom_ca_trust_certificates: A list of up to 10 base64 encoded CAs that will be added to + the trust store on all nodes in the cluster. For more information see `Custom CA Trust + Certificates `_. + :vartype custom_ca_trust_certificates: list[bytes] """ + _validation = { + "custom_ca_trust_certificates": {"max_items": 10, "min_items": 0}, + } + _attribute_map = { "defender": {"key": "defender", "type": "ManagedClusterSecurityProfileDefender"}, "azure_key_vault_kms": {"key": "azureKeyVaultKms", "type": "AzureKeyVaultKms"}, "workload_identity": {"key": "workloadIdentity", "type": "ManagedClusterSecurityProfileWorkloadIdentity"}, "image_cleaner": {"key": "imageCleaner", "type": "ManagedClusterSecurityProfileImageCleaner"}, + "custom_ca_trust_certificates": {"key": "customCATrustCertificates", "type": "[bytearray]"}, } def __init__( @@ -5831,31 +6804,35 @@ def __init__( azure_key_vault_kms: Optional["_models.AzureKeyVaultKms"] = None, workload_identity: Optional["_models.ManagedClusterSecurityProfileWorkloadIdentity"] = None, image_cleaner: Optional["_models.ManagedClusterSecurityProfileImageCleaner"] = None, + custom_ca_trust_certificates: Optional[list[bytes]] = None, **kwargs: Any ) -> None: """ :keyword defender: Microsoft Defender settings for the security profile. - :paramtype defender: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefender + :paramtype defender: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefender :keyword azure_key_vault_kms: Azure Key Vault `key management service `_ settings for the security profile. - :paramtype azure_key_vault_kms: - ~azure.mgmt.containerservice.v2024_07_01.models.AzureKeyVaultKms + :paramtype azure_key_vault_kms: ~azure.mgmt.containerservice.models.AzureKeyVaultKms :keyword workload_identity: Workload identity settings for the security profile. Workload identity enables Kubernetes applications to access Azure cloud resources securely with Azure AD. See https://aka.ms/aks/wi for more details. :paramtype workload_identity: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileWorkloadIdentity + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileWorkloadIdentity :keyword image_cleaner: Image Cleaner settings for the security profile. :paramtype image_cleaner: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileImageCleaner + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileImageCleaner + :keyword custom_ca_trust_certificates: A list of up to 10 base64 encoded CAs that will be added + to the trust store on all nodes in the cluster. For more information see `Custom CA Trust + Certificates `_. + :paramtype custom_ca_trust_certificates: list[bytes] """ super().__init__(**kwargs) self.defender = defender self.azure_key_vault_kms = azure_key_vault_kms self.workload_identity = workload_identity self.image_cleaner = image_cleaner + self.custom_ca_trust_certificates = custom_ca_trust_certificates class ManagedClusterSecurityProfileDefender(_serialization.Model): @@ -5869,7 +6846,7 @@ class ManagedClusterSecurityProfileDefender(_serialization.Model): :ivar security_monitoring: Microsoft Defender threat detection for Cloud settings for the security profile. :vartype security_monitoring: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring """ _attribute_map = { @@ -5896,7 +6873,7 @@ def __init__( :keyword security_monitoring: Microsoft Defender threat detection for Cloud settings for the security profile. :paramtype security_monitoring: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring + ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring """ super().__init__(**kwargs) self.log_analytics_workspace_resource_id = log_analytics_workspace_resource_id @@ -6006,12 +6983,12 @@ def __init__(self, *, client_id: str, secret: Optional[str] = None, **kwargs: An class ManagedClusterSKU(_serialization.Model): """The SKU of a Managed Cluster. - :ivar name: The name of a managed cluster SKU. "Base" - :vartype name: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUName - :ivar tier: If not specified, the default is 'Free'. See `AKS Pricing Tier - `_ for more details. Known - values are: "Premium", "Standard", and "Free". - :vartype tier: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUTier + :ivar name: The name of a managed cluster SKU. Known values are: "Base" and "Automatic". + :vartype name: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUName + :ivar tier: The tier of a managed cluster SKU. If not specified, the default is 'Free'. See + `AKS Pricing Tier `_ for + more details. Known values are: "Premium", "Standard", and "Free". + :vartype tier: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUTier """ _attribute_map = { @@ -6027,33 +7004,79 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword name: The name of a managed cluster SKU. "Base" - :paramtype name: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUName - :keyword tier: If not specified, the default is 'Free'. See `AKS Pricing Tier - `_ for more details. Known - values are: "Premium", "Standard", and "Free". - :paramtype tier: str or ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterSKUTier + :keyword name: The name of a managed cluster SKU. Known values are: "Base" and "Automatic". + :paramtype name: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUName + :keyword tier: The tier of a managed cluster SKU. If not specified, the default is 'Free'. See + `AKS Pricing Tier `_ for + more details. Known values are: "Premium", "Standard", and "Free". + :paramtype tier: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUTier """ super().__init__(**kwargs) self.name = name self.tier = tier +class ManagedClusterStaticEgressGatewayProfile(_serialization.Model): + """The Static Egress Gateway addon configuration for the cluster. + + :ivar enabled: Enable Static Egress Gateway addon. Indicates if Static Egress Gateway addon is + enabled or not. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + } + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Enable Static Egress Gateway addon. Indicates if Static Egress Gateway addon + is enabled or not. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class ManagedClusterStatus(_serialization.Model): + """Contains read-only information about the Managed Cluster. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar provisioning_error: The error details information of the managed cluster. Preserves the + detailed info of failure. If there was no error, this field is omitted. + :vartype provisioning_error: ~azure.mgmt.containerservice.models.ErrorDetail + """ + + _validation = { + "provisioning_error": {"readonly": True}, + } + + _attribute_map = { + "provisioning_error": {"key": "provisioningError", "type": "ErrorDetail"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.provisioning_error: Optional["_models.ErrorDetail"] = None + + class ManagedClusterStorageProfile(_serialization.Model): """Storage profile for the container service cluster. :ivar disk_csi_driver: AzureDisk CSI Driver settings for the storage profile. :vartype disk_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileDiskCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileDiskCSIDriver :ivar file_csi_driver: AzureFile CSI Driver settings for the storage profile. :vartype file_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileFileCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileFileCSIDriver :ivar snapshot_controller: Snapshot Controller settings for the storage profile. :vartype snapshot_controller: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileSnapshotController + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileSnapshotController :ivar blob_csi_driver: AzureBlob CSI Driver settings for the storage profile. :vartype blob_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileBlobCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileBlobCSIDriver """ _attribute_map = { @@ -6075,16 +7098,16 @@ def __init__( """ :keyword disk_csi_driver: AzureDisk CSI Driver settings for the storage profile. :paramtype disk_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileDiskCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileDiskCSIDriver :keyword file_csi_driver: AzureFile CSI Driver settings for the storage profile. :paramtype file_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileFileCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileFileCSIDriver :keyword snapshot_controller: Snapshot Controller settings for the storage profile. :paramtype snapshot_controller: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileSnapshotController + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileSnapshotController :keyword blob_csi_driver: AzureBlob CSI Driver settings for the storage profile. :paramtype blob_csi_driver: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterStorageProfileBlobCSIDriver + ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileBlobCSIDriver """ super().__init__(**kwargs) self.disk_csi_driver = disk_csi_driver @@ -6189,10 +7212,10 @@ class ManagedClusterUpgradeProfile(_serialization.Model): :ivar control_plane_profile: The list of available upgrade versions for the control plane. Required. :vartype control_plane_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile + ~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile :ivar agent_pool_profiles: The list of available upgrade versions for agent pools. Required. :vartype agent_pool_profiles: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile] + list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile] """ _validation = { @@ -6215,22 +7238,22 @@ def __init__( self, *, control_plane_profile: "_models.ManagedClusterPoolUpgradeProfile", - agent_pool_profiles: List["_models.ManagedClusterPoolUpgradeProfile"], + agent_pool_profiles: list["_models.ManagedClusterPoolUpgradeProfile"], **kwargs: Any ) -> None: """ :keyword control_plane_profile: The list of available upgrade versions for the control plane. Required. :paramtype control_plane_profile: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile + ~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile :keyword agent_pool_profiles: The list of available upgrade versions for agent pools. Required. :paramtype agent_pool_profiles: - list[~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterPoolUpgradeProfile] + list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile] """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None self.control_plane_profile = control_plane_profile self.agent_pool_profiles = agent_pool_profiles @@ -6240,31 +7263,31 @@ class ManagedClusterWindowsProfile(_serialization.Model): All required parameters must be populated in order to send to server. - :ivar admin_username: Specifies the name of the administrator account. :code:`
`:code:`
` - **Restriction:** Cannot end in "." :code:`
`:code:`
` **Disallowed values:** - "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", - "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", - "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", - "test3", "user4", "user5". :code:`
`:code:`
` **Minimum-length:** 1 character - :code:`
`:code:`
` **Max-length:** 20 characters. Required. + :ivar admin_username: Specifies the name of the administrator account. :code:`
`\\ + :code:`
` **Restriction:** Cannot end in "." :code:`
`\\ :code:`
` **Disallowed + values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", + "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", + "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", + "test2", "test3", "user4", "user5". :code:`
`\\ :code:`
` **Minimum-length:** 1 character + :code:`
`\\ :code:`
` **Max-length:** 20 characters. Required. :vartype admin_username: str - :ivar admin_password: Specifies the password of the administrator account. - :code:`
`:code:`
` **Minimum-length:** 8 characters :code:`
`:code:`
` - **Max-length:** 123 characters :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 - conditions below need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper - characters :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) - :code:`
`:code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", + :ivar admin_password: Specifies the password of the administrator account. :code:`
`\\ + :code:`
` **Minimum-length:** 8 characters :code:`
`\\ :code:`
` **Max-length:** 123 + characters :code:`
`\\ :code:`
` **Complexity requirements:** 3 out of 4 conditions below + need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper characters + :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) + :code:`
`\\ :code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!". :vartype admin_password: str :ivar license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits `_ for more details. Known values are: "None" and "Windows_Server". - :vartype license_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.LicenseType - :ivar enable_csi_proxy: For more details on CSI proxy, see the `CSI proxy GitHub repo - `_. + :vartype license_type: str or ~azure.mgmt.containerservice.models.LicenseType + :ivar enable_csi_proxy: Whether to enable CSI proxy. For more details on CSI proxy, see the + `CSI proxy GitHub repo `_. :vartype enable_csi_proxy: bool :ivar gmsa_profile: The Windows gMSA Profile in the Managed Cluster. - :vartype gmsa_profile: ~azure.mgmt.containerservice.v2024_07_01.models.WindowsGmsaProfile + :vartype gmsa_profile: ~azure.mgmt.containerservice.models.WindowsGmsaProfile """ _validation = { @@ -6290,31 +7313,31 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword admin_username: Specifies the name of the administrator account. - :code:`
`:code:`
` **Restriction:** Cannot end in "." :code:`
`:code:`
` - **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", - "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", - "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", - "sys", "test2", "test3", "user4", "user5". :code:`
`:code:`
` **Minimum-length:** 1 - character :code:`
`:code:`
` **Max-length:** 20 characters. Required. + :keyword admin_username: Specifies the name of the administrator account. :code:`
`\\ + :code:`
` **Restriction:** Cannot end in "." :code:`
`\\ :code:`
` **Disallowed + values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", + "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", + "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", + "test2", "test3", "user4", "user5". :code:`
`\\ :code:`
` **Minimum-length:** 1 character + :code:`
`\\ :code:`
` **Max-length:** 20 characters. Required. :paramtype admin_username: str - :keyword admin_password: Specifies the password of the administrator account. - :code:`
`:code:`
` **Minimum-length:** 8 characters :code:`
`:code:`
` - **Max-length:** 123 characters :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 - conditions below need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper - characters :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) - :code:`
`:code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", + :keyword admin_password: Specifies the password of the administrator account. :code:`
`\\ + :code:`
` **Minimum-length:** 8 characters :code:`
`\\ :code:`
` **Max-length:** 123 + characters :code:`
`\\ :code:`
` **Complexity requirements:** 3 out of 4 conditions below + need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper characters + :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_]) + :code:`
`\\ :code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!". :paramtype admin_password: str :keyword license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits `_ for more details. Known values are: "None" and "Windows_Server". - :paramtype license_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.LicenseType - :keyword enable_csi_proxy: For more details on CSI proxy, see the `CSI proxy GitHub repo - `_. + :paramtype license_type: str or ~azure.mgmt.containerservice.models.LicenseType + :keyword enable_csi_proxy: Whether to enable CSI proxy. For more details on CSI proxy, see the + `CSI proxy GitHub repo `_. :paramtype enable_csi_proxy: bool :keyword gmsa_profile: The Windows gMSA Profile in the Managed Cluster. - :paramtype gmsa_profile: ~azure.mgmt.containerservice.v2024_07_01.models.WindowsGmsaProfile + :paramtype gmsa_profile: ~azure.mgmt.containerservice.models.WindowsGmsaProfile """ super().__init__(**kwargs) self.admin_username = admin_username @@ -6329,12 +7352,11 @@ class ManagedClusterWorkloadAutoScalerProfile(_serialization.Model): :ivar keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler profile. - :vartype keda: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileKeda + :vartype keda: ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileKeda :ivar vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload auto-scaler profile. :vartype vertical_pod_autoscaler: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler + ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler """ _attribute_map = { @@ -6358,11 +7380,11 @@ def __init__( :keyword keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler profile. :paramtype keda: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileKeda + ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileKeda :keyword vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload auto-scaler profile. :paramtype vertical_pod_autoscaler: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler + ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler """ super().__init__(**kwargs) self.keda = keda @@ -6423,6 +7445,108 @@ def __init__(self, *, enabled: bool = False, **kwargs: Any) -> None: self.enabled = enabled +class ManagedNamespace(SubResource): + """Namespace managed by ARM. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource ID. + :vartype id: str + :ivar name: The name of the resource that is unique within a resource group. This name can be + used to access the resource. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData + :ivar tags: The tags to be persisted on the managed cluster namespace. + :vartype tags: dict[str, str] + :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value + will change when the resource is updated. Specify an if-match or if-none-match header with the + eTag value for a subsequent request to enable optimistic concurrency per the normal eTag + convention. + :vartype e_tag: str + :ivar location: The location of the namespace. + :vartype location: str + :ivar properties: Properties of a namespace. + :vartype properties: ~azure.mgmt.containerservice.models.NamespaceProperties + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "system_data": {"readonly": True}, + "e_tag": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "system_data": {"key": "systemData", "type": "SystemData"}, + "tags": {"key": "tags", "type": "{str}"}, + "e_tag": {"key": "eTag", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "properties": {"key": "properties", "type": "NamespaceProperties"}, + } + + def __init__( + self, + *, + tags: Optional[dict[str, str]] = None, + location: Optional[str] = None, + properties: Optional["_models.NamespaceProperties"] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: The tags to be persisted on the managed cluster namespace. + :paramtype tags: dict[str, str] + :keyword location: The location of the namespace. + :paramtype location: str + :keyword properties: Properties of a namespace. + :paramtype properties: ~azure.mgmt.containerservice.models.NamespaceProperties + """ + super().__init__(**kwargs) + self.system_data: Optional["_models.SystemData"] = None + self.tags = tags + self.e_tag: Optional[str] = None + self.location = location + self.properties = properties + + +class ManagedNamespaceListResult(_serialization.Model): + """The result of a request to list managed namespaces in a managed cluster. + + :ivar value: The list of managed namespaces. + :vartype value: list[~azure.mgmt.containerservice.models.ManagedNamespace] + :ivar next_link: The URI to fetch the next page of results, if any. + :vartype next_link: str + """ + + _attribute_map = { + "value": {"key": "value", "type": "[ManagedNamespace]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__( + self, + *, + value: Optional[list["_models.ManagedNamespace"]] = None, + next_link: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword value: The list of managed namespaces. + :paramtype value: list[~azure.mgmt.containerservice.models.ManagedNamespace] + :keyword next_link: The URI to fetch the next page of results, if any. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + class ManagedServiceIdentityUserAssignedIdentitiesValue(_serialization.Model): # pylint: disable=name-too-long """ManagedServiceIdentityUserAssignedIdentitiesValue. @@ -6447,8 +7571,36 @@ class ManagedServiceIdentityUserAssignedIdentitiesValue(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.principal_id = None - self.client_id = None + self.principal_id: Optional[str] = None + self.client_id: Optional[str] = None + + +class ManualScaleProfile(_serialization.Model): + """Specifications on number of machines. + + :ivar size: VM size that AKS will use when creating and scaling e.g. 'Standard_E4s_v3', + 'Standard_E16s_v3' or 'Standard_D16s_v5'. + :vartype size: str + :ivar count: Number of nodes. + :vartype count: int + """ + + _attribute_map = { + "size": {"key": "size", "type": "str"}, + "count": {"key": "count", "type": "int"}, + } + + def __init__(self, *, size: Optional[str] = None, count: Optional[int] = None, **kwargs: Any) -> None: + """ + :keyword size: VM size that AKS will use when creating and scaling e.g. 'Standard_E4s_v3', + 'Standard_E16s_v3' or 'Standard_D16s_v5'. + :paramtype size: str + :keyword count: Number of nodes. + :paramtype count: int + """ + super().__init__(**kwargs) + self.size = size + self.count = count class MeshRevision(_serialization.Model): @@ -6460,8 +7612,7 @@ class MeshRevision(_serialization.Model): :vartype upgrades: list[str] :ivar compatible_with: List of items this revision of service mesh is compatible with, and their associated versions. - :vartype compatible_with: - list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + :vartype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions] """ _attribute_map = { @@ -6474,8 +7625,8 @@ def __init__( self, *, revision: Optional[str] = None, - upgrades: Optional[List[str]] = None, - compatible_with: Optional[List["_models.CompatibleVersions"]] = None, + upgrades: Optional[list[str]] = None, + compatible_with: Optional[list["_models.CompatibleVersions"]] = None, **kwargs: Any ) -> None: """ @@ -6485,8 +7636,7 @@ def __init__( :paramtype upgrades: list[str] :keyword compatible_with: List of items this revision of service mesh is compatible with, and their associated versions. - :paramtype compatible_with: - list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + :paramtype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions] """ super().__init__(**kwargs) self.revision = revision @@ -6501,7 +7651,7 @@ class ProxyResource(Resource): Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -6510,7 +7660,7 @@ class ProxyResource(Resource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData """ @@ -6520,7 +7670,7 @@ class MeshRevisionProfile(ProxyResource): Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -6529,10 +7679,9 @@ class MeshRevisionProfile(ProxyResource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar properties: Mesh revision profile properties for a mesh. - :vartype properties: - ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfileProperties + :vartype properties: ~azure.mgmt.containerservice.models.MeshRevisionProfileProperties """ _validation = { @@ -6553,8 +7702,7 @@ class MeshRevisionProfile(ProxyResource): def __init__(self, *, properties: Optional["_models.MeshRevisionProfileProperties"] = None, **kwargs: Any) -> None: """ :keyword properties: Mesh revision profile properties for a mesh. - :paramtype properties: - ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfileProperties + :paramtype properties: ~azure.mgmt.containerservice.models.MeshRevisionProfileProperties """ super().__init__(**kwargs) self.properties = properties @@ -6566,7 +7714,7 @@ class MeshRevisionProfileList(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: Array of service mesh add-on revision profiles for all supported mesh modes. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :vartype value: list[~azure.mgmt.containerservice.models.MeshRevisionProfile] :ivar next_link: The URL to get the next set of mesh revision profile. :vartype next_link: str """ @@ -6580,31 +7728,31 @@ class MeshRevisionProfileList(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.MeshRevisionProfile"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.MeshRevisionProfile"]] = None, **kwargs: Any) -> None: """ :keyword value: Array of service mesh add-on revision profiles for all supported mesh modes. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :paramtype value: list[~azure.mgmt.containerservice.models.MeshRevisionProfile] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class MeshRevisionProfileProperties(_serialization.Model): """Mesh revision profile properties for a mesh. :ivar mesh_revisions: - :vartype mesh_revisions: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevision] + :vartype mesh_revisions: list[~azure.mgmt.containerservice.models.MeshRevision] """ _attribute_map = { "mesh_revisions": {"key": "meshRevisions", "type": "[MeshRevision]"}, } - def __init__(self, *, mesh_revisions: Optional[List["_models.MeshRevision"]] = None, **kwargs: Any) -> None: + def __init__(self, *, mesh_revisions: Optional[list["_models.MeshRevision"]] = None, **kwargs: Any) -> None: """ :keyword mesh_revisions: - :paramtype mesh_revisions: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevision] + :paramtype mesh_revisions: list[~azure.mgmt.containerservice.models.MeshRevision] """ super().__init__(**kwargs) self.mesh_revisions = mesh_revisions @@ -6616,7 +7764,7 @@ class MeshUpgradeProfile(ProxyResource): Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -6625,10 +7773,9 @@ class MeshUpgradeProfile(ProxyResource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar properties: Mesh upgrade profile properties for a major.minor release. - :vartype properties: - ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfileProperties + :vartype properties: ~azure.mgmt.containerservice.models.MeshUpgradeProfileProperties """ _validation = { @@ -6649,8 +7796,7 @@ class MeshUpgradeProfile(ProxyResource): def __init__(self, *, properties: Optional["_models.MeshUpgradeProfileProperties"] = None, **kwargs: Any) -> None: """ :keyword properties: Mesh upgrade profile properties for a major.minor release. - :paramtype properties: - ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfileProperties + :paramtype properties: ~azure.mgmt.containerservice.models.MeshUpgradeProfileProperties """ super().__init__(**kwargs) self.properties = properties @@ -6662,7 +7808,7 @@ class MeshUpgradeProfileList(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: Array of supported service mesh add-on upgrade profiles. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :vartype value: list[~azure.mgmt.containerservice.models.MeshUpgradeProfile] :ivar next_link: The URL to get the next set of mesh upgrade profile. :vartype next_link: str """ @@ -6676,14 +7822,14 @@ class MeshUpgradeProfileList(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.MeshUpgradeProfile"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.MeshUpgradeProfile"]] = None, **kwargs: Any) -> None: """ :keyword value: Array of supported service mesh add-on upgrade profiles. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :paramtype value: list[~azure.mgmt.containerservice.models.MeshUpgradeProfile] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class MeshUpgradeProfileProperties(MeshRevision): @@ -6695,18 +7841,149 @@ class MeshUpgradeProfileProperties(MeshRevision): :vartype upgrades: list[str] :ivar compatible_with: List of items this revision of service mesh is compatible with, and their associated versions. - :vartype compatible_with: - list[~azure.mgmt.containerservice.v2024_07_01.models.CompatibleVersions] + :vartype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions] """ +class NamespaceProperties(_serialization.Model): + """Properties of a namespace managed by ARM. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar provisioning_state: The current provisioning state of the namespace. Known values are: + "Updating", "Deleting", "Creating", "Succeeded", "Failed", and "Canceled". + :vartype provisioning_state: str or + ~azure.mgmt.containerservice.models.NamespaceProvisioningState + :ivar labels: The labels of managed namespace. + :vartype labels: dict[str, str] + :ivar annotations: The annotations of managed namespace. + :vartype annotations: dict[str, str] + :ivar portal_fqdn: The special FQDN used by the Azure Portal to access the Managed Cluster. + This FQDN is for use only by the Azure Portal and should not be used by other clients. The + Azure Portal requires certain Cross-Origin Resource Sharing (CORS) headers to be sent in some + responses, which Kubernetes APIServer doesn't handle by default. This special FQDN supports + CORS, allowing the Azure Portal to function properly. + :vartype portal_fqdn: str + :ivar default_resource_quota: The default resource quota enforced upon the namespace. Customers + can have other Kubernetes resource quota objects under the namespace. Resource quotas are + additive; if multiple resource quotas are applied to a given namespace, then the effective + limit will be one such that all quotas on the namespace can be satisfied. + :vartype default_resource_quota: ~azure.mgmt.containerservice.models.ResourceQuota + :ivar default_network_policy: The default network policy enforced upon the namespace. Customers + can have other Kubernetes network policy objects under the namespace. Network policies are + additive; if a policy or policies apply to a given pod for a given direction, the connections + allowed in that direction for the pod is the union of what all applicable policies allow. + :vartype default_network_policy: ~azure.mgmt.containerservice.models.NetworkPolicies + :ivar adoption_policy: Action if Kubernetes namespace with same name already exists. Known + values are: "Never", "IfIdentical", and "Always". + :vartype adoption_policy: str or ~azure.mgmt.containerservice.models.AdoptionPolicy + :ivar delete_policy: Delete options of a namespace. Known values are: "Keep" and "Delete". + :vartype delete_policy: str or ~azure.mgmt.containerservice.models.DeletePolicy + """ + + _validation = { + "provisioning_state": {"readonly": True}, + "portal_fqdn": {"readonly": True}, + } + + _attribute_map = { + "provisioning_state": {"key": "provisioningState", "type": "str"}, + "labels": {"key": "labels", "type": "{str}"}, + "annotations": {"key": "annotations", "type": "{str}"}, + "portal_fqdn": {"key": "portalFqdn", "type": "str"}, + "default_resource_quota": {"key": "defaultResourceQuota", "type": "ResourceQuota"}, + "default_network_policy": {"key": "defaultNetworkPolicy", "type": "NetworkPolicies"}, + "adoption_policy": {"key": "adoptionPolicy", "type": "str"}, + "delete_policy": {"key": "deletePolicy", "type": "str"}, + } + + def __init__( + self, + *, + labels: Optional[dict[str, str]] = None, + annotations: Optional[dict[str, str]] = None, + default_resource_quota: Optional["_models.ResourceQuota"] = None, + default_network_policy: Optional["_models.NetworkPolicies"] = None, + adoption_policy: Optional[Union[str, "_models.AdoptionPolicy"]] = None, + delete_policy: Optional[Union[str, "_models.DeletePolicy"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword labels: The labels of managed namespace. + :paramtype labels: dict[str, str] + :keyword annotations: The annotations of managed namespace. + :paramtype annotations: dict[str, str] + :keyword default_resource_quota: The default resource quota enforced upon the namespace. + Customers can have other Kubernetes resource quota objects under the namespace. Resource quotas + are additive; if multiple resource quotas are applied to a given namespace, then the effective + limit will be one such that all quotas on the namespace can be satisfied. + :paramtype default_resource_quota: ~azure.mgmt.containerservice.models.ResourceQuota + :keyword default_network_policy: The default network policy enforced upon the namespace. + Customers can have other Kubernetes network policy objects under the namespace. Network + policies are additive; if a policy or policies apply to a given pod for a given direction, the + connections allowed in that direction for the pod is the union of what all applicable policies + allow. + :paramtype default_network_policy: ~azure.mgmt.containerservice.models.NetworkPolicies + :keyword adoption_policy: Action if Kubernetes namespace with same name already exists. Known + values are: "Never", "IfIdentical", and "Always". + :paramtype adoption_policy: str or ~azure.mgmt.containerservice.models.AdoptionPolicy + :keyword delete_policy: Delete options of a namespace. Known values are: "Keep" and "Delete". + :paramtype delete_policy: str or ~azure.mgmt.containerservice.models.DeletePolicy + """ + super().__init__(**kwargs) + self.provisioning_state: Optional[Union[str, "_models.NamespaceProvisioningState"]] = None + self.labels = labels + self.annotations = annotations + self.portal_fqdn: Optional[str] = None + self.default_resource_quota = default_resource_quota + self.default_network_policy = default_network_policy + self.adoption_policy = adoption_policy + self.delete_policy = delete_policy + + +class NetworkPolicies(_serialization.Model): + """Default network policy of the namespace, specifying ingress and egress rules. + + :ivar ingress: Ingress policy for the network. Known values are: "DenyAll", "AllowAll", and + "AllowSameNamespace". + :vartype ingress: str or ~azure.mgmt.containerservice.models.PolicyRule + :ivar egress: Egress policy for the network. Known values are: "DenyAll", "AllowAll", and + "AllowSameNamespace". + :vartype egress: str or ~azure.mgmt.containerservice.models.PolicyRule + """ + + _attribute_map = { + "ingress": {"key": "ingress", "type": "str"}, + "egress": {"key": "egress", "type": "str"}, + } + + def __init__( + self, + *, + ingress: Optional[Union[str, "_models.PolicyRule"]] = None, + egress: Optional[Union[str, "_models.PolicyRule"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword ingress: Ingress policy for the network. Known values are: "DenyAll", "AllowAll", and + "AllowSameNamespace". + :paramtype ingress: str or ~azure.mgmt.containerservice.models.PolicyRule + :keyword egress: Egress policy for the network. Known values are: "DenyAll", "AllowAll", and + "AllowSameNamespace". + :paramtype egress: str or ~azure.mgmt.containerservice.models.PolicyRule + """ + super().__init__(**kwargs) + self.ingress = ingress + self.egress = egress + + class OperationListResult(_serialization.Model): """The List Operation response. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of operations. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + :vartype value: list[~azure.mgmt.containerservice.models.OperationValue] """ _validation = { @@ -6720,7 +7997,7 @@ class OperationListResult(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.value = None + self.value: Optional[list["_models.OperationValue"]] = None class OperationValue(_serialization.Model): @@ -6763,12 +8040,12 @@ class OperationValue(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.origin = None - self.name = None - self.operation = None - self.resource = None - self.description = None - self.provider = None + self.origin: Optional[str] = None + self.name: Optional[str] = None + self.operation: Optional[str] = None + self.resource: Optional[str] = None + self.description: Optional[str] = None + self.provider: Optional[str] = None class OutboundEnvironmentEndpoint(_serialization.Model): @@ -6778,7 +8055,7 @@ class OutboundEnvironmentEndpoint(_serialization.Model): azure-resource-management, apiserver, etc. :vartype category: str :ivar endpoints: The endpoints that AKS agent nodes connect to. - :vartype endpoints: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDependency] + :vartype endpoints: list[~azure.mgmt.containerservice.models.EndpointDependency] """ _attribute_map = { @@ -6790,7 +8067,7 @@ def __init__( self, *, category: Optional[str] = None, - endpoints: Optional[List["_models.EndpointDependency"]] = None, + endpoints: Optional[list["_models.EndpointDependency"]] = None, **kwargs: Any ) -> None: """ @@ -6798,7 +8075,7 @@ def __init__( azure-resource-management, apiserver, etc. :paramtype category: str :keyword endpoints: The endpoints that AKS agent nodes connect to. - :paramtype endpoints: list[~azure.mgmt.containerservice.v2024_07_01.models.EndpointDependency] + :paramtype endpoints: list[~azure.mgmt.containerservice.models.EndpointDependency] """ super().__init__(**kwargs) self.category = category @@ -6813,8 +8090,7 @@ class OutboundEnvironmentEndpointCollection(_serialization.Model): All required parameters must be populated in order to send to server. :ivar value: Collection of resources. Required. - :vartype value: - list[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + :vartype value: list[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint] :ivar next_link: Link to next page of resources. :vartype next_link: str """ @@ -6829,15 +8105,14 @@ class OutboundEnvironmentEndpointCollection(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: List["_models.OutboundEnvironmentEndpoint"], **kwargs: Any) -> None: + def __init__(self, *, value: list["_models.OutboundEnvironmentEndpoint"], **kwargs: Any) -> None: """ :keyword value: Collection of resources. Required. - :paramtype value: - list[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + :paramtype value: list[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class PortRange(_serialization.Model): @@ -6850,7 +8125,7 @@ class PortRange(_serialization.Model): 65535, and be greater than or equal to portStart. :vartype port_end: int :ivar protocol: The network protocol of the port. Known values are: "TCP" and "UDP". - :vartype protocol: str or ~azure.mgmt.containerservice.v2024_07_01.models.Protocol + :vartype protocol: str or ~azure.mgmt.containerservice.models.Protocol """ _validation = { @@ -6880,7 +8155,7 @@ def __init__( to 65535, and be greater than or equal to portStart. :paramtype port_end: int :keyword protocol: The network protocol of the port. Known values are: "TCP" and "UDP". - :paramtype protocol: str or ~azure.mgmt.containerservice.v2024_07_01.models.Protocol + :paramtype protocol: str or ~azure.mgmt.containerservice.models.Protocol """ super().__init__(**kwargs) self.port_start = port_start @@ -6893,7 +8168,7 @@ class PowerState(_serialization.Model): :ivar code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and "Stopped". - :vartype code: str or ~azure.mgmt.containerservice.v2024_07_01.models.Code + :vartype code: str or ~azure.mgmt.containerservice.models.Code """ _attribute_map = { @@ -6904,7 +8179,7 @@ def __init__(self, *, code: Optional[Union[str, "_models.Code"]] = None, **kwarg """ :keyword code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and "Stopped". - :paramtype code: str or ~azure.mgmt.containerservice.v2024_07_01.models.Code + :paramtype code: str or ~azure.mgmt.containerservice.models.Code """ super().__init__(**kwargs) self.code = code @@ -6944,13 +8219,13 @@ class PrivateEndpointConnection(_serialization.Model): :ivar provisioning_state: The current provisioning state. Known values are: "Canceled", "Creating", "Deleting", "Failed", and "Succeeded". :vartype provisioning_state: str or - ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionProvisioningState + ~azure.mgmt.containerservice.models.PrivateEndpointConnectionProvisioningState :ivar private_endpoint: The resource of private endpoint. - :vartype private_endpoint: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpoint + :vartype private_endpoint: ~azure.mgmt.containerservice.models.PrivateEndpoint :ivar private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :vartype private_link_service_connection_state: - ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkServiceConnectionState + ~azure.mgmt.containerservice.models.PrivateLinkServiceConnectionState """ _validation = { @@ -6981,17 +8256,17 @@ def __init__( ) -> None: """ :keyword private_endpoint: The resource of private endpoint. - :paramtype private_endpoint: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpoint + :paramtype private_endpoint: ~azure.mgmt.containerservice.models.PrivateEndpoint :keyword private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :paramtype private_link_service_connection_state: - ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkServiceConnectionState + ~azure.mgmt.containerservice.models.PrivateLinkServiceConnectionState """ super().__init__(**kwargs) - self.id = None - self.name = None - self.type = None - self.provisioning_state = None + self.id: Optional[str] = None + self.name: Optional[str] = None + self.type: Optional[str] = None + self.provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None self.private_endpoint = private_endpoint self.private_link_service_connection_state = private_link_service_connection_state @@ -7000,18 +8275,17 @@ class PrivateEndpointConnectionListResult(_serialization.Model): """A list of private endpoint connections. :ivar value: The collection value. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection] + :vartype value: list[~azure.mgmt.containerservice.models.PrivateEndpointConnection] """ _attribute_map = { "value": {"key": "value", "type": "[PrivateEndpointConnection]"}, } - def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None: """ :keyword value: The collection value. - :paramtype value: - list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection] + :paramtype value: list[~azure.mgmt.containerservice.models.PrivateEndpointConnection] """ super().__init__(**kwargs) self.value = value @@ -7057,7 +8331,7 @@ def __init__( name: Optional[str] = None, type: Optional[str] = None, group_id: Optional[str] = None, - required_members: Optional[List[str]] = None, + required_members: Optional[list[str]] = None, **kwargs: Any ) -> None: """ @@ -7078,24 +8352,24 @@ def __init__( self.type = type self.group_id = group_id self.required_members = required_members - self.private_link_service_id = None + self.private_link_service_id: Optional[str] = None class PrivateLinkResourcesListResult(_serialization.Model): """A list of private link resources. :ivar value: The collection value. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + :vartype value: list[~azure.mgmt.containerservice.models.PrivateLinkResource] """ _attribute_map = { "value": {"key": "value", "type": "[PrivateLinkResource]"}, } - def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None: """ :keyword value: The collection value. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource] + :paramtype value: list[~azure.mgmt.containerservice.models.PrivateLinkResource] """ super().__init__(**kwargs) self.value = value @@ -7106,7 +8380,7 @@ class PrivateLinkServiceConnectionState(_serialization.Model): :ivar status: The private link service connection status. Known values are: "Pending", "Approved", "Rejected", and "Disconnected". - :vartype status: str or ~azure.mgmt.containerservice.v2024_07_01.models.ConnectionStatus + :vartype status: str or ~azure.mgmt.containerservice.models.ConnectionStatus :ivar description: The private link service connection description. :vartype description: str """ @@ -7126,7 +8400,7 @@ def __init__( """ :keyword status: The private link service connection status. Known values are: "Pending", "Approved", "Rejected", and "Disconnected". - :paramtype status: str or ~azure.mgmt.containerservice.v2024_07_01.models.ConnectionStatus + :paramtype status: str or ~azure.mgmt.containerservice.models.ConnectionStatus :keyword description: The private link service connection description. :paramtype description: str """ @@ -7144,12 +8418,12 @@ class RelativeMonthlySchedule(_serialization.Model): :ivar interval_months: Specifies the number of months between each set of occurrences. Required. :vartype interval_months: int - :ivar week_index: Specifies on which week of the month the dayOfWeek applies. Required. Known - values are: "First", "Second", "Third", "Fourth", and "Last". - :vartype week_index: str or ~azure.mgmt.containerservice.v2024_07_01.models.Type + :ivar week_index: The week index. Specifies on which week of the month the dayOfWeek applies. + Required. Known values are: "First", "Second", "Third", "Fourth", and "Last". + :vartype week_index: str or ~azure.mgmt.containerservice.models.Type :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :vartype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :vartype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay """ _validation = { @@ -7176,13 +8450,13 @@ def __init__( :keyword interval_months: Specifies the number of months between each set of occurrences. Required. :paramtype interval_months: int - :keyword week_index: Specifies on which week of the month the dayOfWeek applies. Required. - Known values are: "First", "Second", "Third", "Fourth", and "Last". - :paramtype week_index: str or ~azure.mgmt.containerservice.v2024_07_01.models.Type + :keyword week_index: The week index. Specifies on which week of the month the dayOfWeek + applies. Required. Known values are: "First", "Second", "Third", "Fourth", and "Last". + :paramtype week_index: str or ~azure.mgmt.containerservice.models.Type :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :paramtype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :paramtype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay """ super().__init__(**kwargs) self.interval_months = interval_months @@ -7190,6 +8464,75 @@ def __init__( self.day_of_week = day_of_week +class ResourceQuota(_serialization.Model): + """Resource quota for the namespace. + + :ivar cpu_request: CPU request of the namespace in one-thousandth CPU form. See `CPU resource + units + `_ + for more details. + :vartype cpu_request: str + :ivar cpu_limit: CPU limit of the namespace in one-thousandth CPU form. See `CPU resource units + `_ + for more details. + :vartype cpu_limit: str + :ivar memory_request: Memory request of the namespace in the power-of-two equivalents form: Ei, + Pi, Ti, Gi, Mi, Ki. See `Memory resource units + `_ + for more details. + :vartype memory_request: str + :ivar memory_limit: Memory limit of the namespace in the power-of-two equivalents form: Ei, Pi, + Ti, Gi, Mi, Ki. See `Memory resource units + `_ + for more details. + :vartype memory_limit: str + """ + + _attribute_map = { + "cpu_request": {"key": "cpuRequest", "type": "str"}, + "cpu_limit": {"key": "cpuLimit", "type": "str"}, + "memory_request": {"key": "memoryRequest", "type": "str"}, + "memory_limit": {"key": "memoryLimit", "type": "str"}, + } + + def __init__( + self, + *, + cpu_request: Optional[str] = None, + cpu_limit: Optional[str] = None, + memory_request: Optional[str] = None, + memory_limit: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword cpu_request: CPU request of the namespace in one-thousandth CPU form. See `CPU + resource units + `_ + for more details. + :paramtype cpu_request: str + :keyword cpu_limit: CPU limit of the namespace in one-thousandth CPU form. See `CPU resource + units + `_ + for more details. + :paramtype cpu_limit: str + :keyword memory_request: Memory request of the namespace in the power-of-two equivalents form: + Ei, Pi, Ti, Gi, Mi, Ki. See `Memory resource units + `_ + for more details. + :paramtype memory_request: str + :keyword memory_limit: Memory limit of the namespace in the power-of-two equivalents form: Ei, + Pi, Ti, Gi, Mi, Ki. See `Memory resource units + `_ + for more details. + :paramtype memory_limit: str + """ + super().__init__(**kwargs) + self.cpu_request = cpu_request + self.cpu_limit = cpu_limit + self.memory_request = memory_request + self.memory_limit = memory_limit + + class ResourceReference(_serialization.Model): """A reference to an Azure resource. @@ -7294,13 +8637,33 @@ class RunCommandResult(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.id = None - self.provisioning_state = None - self.exit_code = None - self.started_at = None - self.finished_at = None - self.logs = None - self.reason = None + self.id: Optional[str] = None + self.provisioning_state: Optional[str] = None + self.exit_code: Optional[int] = None + self.started_at: Optional[datetime.datetime] = None + self.finished_at: Optional[datetime.datetime] = None + self.logs: Optional[str] = None + self.reason: Optional[str] = None + + +class ScaleProfile(_serialization.Model): + """Specifications on how to scale a VirtualMachines agent pool. + + :ivar manual: Specifications on how to scale the VirtualMachines agent pool to a fixed size. + :vartype manual: list[~azure.mgmt.containerservice.models.ManualScaleProfile] + """ + + _attribute_map = { + "manual": {"key": "manual", "type": "[ManualScaleProfile]"}, + } + + def __init__(self, *, manual: Optional[list["_models.ManualScaleProfile"]] = None, **kwargs: Any) -> None: + """ + :keyword manual: Specifications on how to scale the VirtualMachines agent pool to a fixed size. + :paramtype manual: list[~azure.mgmt.containerservice.models.ManualScaleProfile] + """ + super().__init__(**kwargs) + self.manual = manual class Schedule(_serialization.Model): @@ -7308,17 +8671,15 @@ class Schedule(_serialization.Model): 'absoluteMonthly' or 'relativeMonthly' for your maintenance schedule. :ivar daily: For schedules like: 'recur every day' or 'recur every 3 days'. - :vartype daily: ~azure.mgmt.containerservice.v2024_07_01.models.DailySchedule + :vartype daily: ~azure.mgmt.containerservice.models.DailySchedule :ivar weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'. - :vartype weekly: ~azure.mgmt.containerservice.v2024_07_01.models.WeeklySchedule + :vartype weekly: ~azure.mgmt.containerservice.models.WeeklySchedule :ivar absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every 3 months on the 20th'. - :vartype absolute_monthly: - ~azure.mgmt.containerservice.v2024_07_01.models.AbsoluteMonthlySchedule + :vartype absolute_monthly: ~azure.mgmt.containerservice.models.AbsoluteMonthlySchedule :ivar relative_monthly: For schedules like: 'recur every month on the first Monday' or 'recur every 3 months on last Friday'. - :vartype relative_monthly: - ~azure.mgmt.containerservice.v2024_07_01.models.RelativeMonthlySchedule + :vartype relative_monthly: ~azure.mgmt.containerservice.models.RelativeMonthlySchedule """ _attribute_map = { @@ -7339,18 +8700,16 @@ def __init__( ) -> None: """ :keyword daily: For schedules like: 'recur every day' or 'recur every 3 days'. - :paramtype daily: ~azure.mgmt.containerservice.v2024_07_01.models.DailySchedule + :paramtype daily: ~azure.mgmt.containerservice.models.DailySchedule :keyword weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'. - :paramtype weekly: ~azure.mgmt.containerservice.v2024_07_01.models.WeeklySchedule + :paramtype weekly: ~azure.mgmt.containerservice.models.WeeklySchedule :keyword absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every 3 months on the 20th'. - :paramtype absolute_monthly: - ~azure.mgmt.containerservice.v2024_07_01.models.AbsoluteMonthlySchedule + :paramtype absolute_monthly: ~azure.mgmt.containerservice.models.AbsoluteMonthlySchedule :keyword relative_monthly: For schedules like: 'recur every month on the first Monday' or 'recur every 3 months on last Friday'. - :paramtype relative_monthly: - ~azure.mgmt.containerservice.v2024_07_01.models.RelativeMonthlySchedule + :paramtype relative_monthly: ~azure.mgmt.containerservice.models.RelativeMonthlySchedule """ super().__init__(**kwargs) self.daily = daily @@ -7365,9 +8724,9 @@ class ServiceMeshProfile(_serialization.Model): All required parameters must be populated in order to send to server. :ivar mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled". - :vartype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshMode + :vartype mode: str or ~azure.mgmt.containerservice.models.ServiceMeshMode :ivar istio: Istio service mesh configuration. - :vartype istio: ~azure.mgmt.containerservice.v2024_07_01.models.IstioServiceMesh + :vartype istio: ~azure.mgmt.containerservice.models.IstioServiceMesh """ _validation = { @@ -7388,16 +8747,16 @@ def __init__( ) -> None: """ :keyword mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled". - :paramtype mode: str or ~azure.mgmt.containerservice.v2024_07_01.models.ServiceMeshMode + :paramtype mode: str or ~azure.mgmt.containerservice.models.ServiceMeshMode :keyword istio: Istio service mesh configuration. - :paramtype istio: ~azure.mgmt.containerservice.v2024_07_01.models.IstioServiceMesh + :paramtype istio: ~azure.mgmt.containerservice.models.IstioServiceMesh """ super().__init__(**kwargs) self.mode = mode self.istio = istio -class Snapshot(TrackedResource): # pylint: disable=too-many-instance-attributes +class Snapshot(TrackedResource): """A node pool snapshot resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -7405,7 +8764,7 @@ class Snapshot(TrackedResource): # pylint: disable=too-many-instance-attributes All required parameters must be populated in order to send to server. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -7414,28 +8773,28 @@ class Snapshot(TrackedResource): # pylint: disable=too-many-instance-attributes :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar tags: Resource tags. :vartype tags: dict[str, str] :ivar location: The geo-location where the resource lives. Required. :vartype location: str :ivar creation_data: CreationData to be used to specify the source agent pool resource ID to create this snapshot. - :vartype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData :ivar snapshot_type: The type of a snapshot. The default is NodePool. "NodePool" - :vartype snapshot_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.SnapshotType + :vartype snapshot_type: str or ~azure.mgmt.containerservice.models.SnapshotType :ivar kubernetes_version: The version of Kubernetes. :vartype kubernetes_version: str :ivar node_image_version: The version of node image. :vartype node_image_version: str :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and "Windows". - :vartype os_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSType + :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= - 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "CBLMariner", - "Windows2019", and "Windows2022". - :vartype os_sku: str or ~azure.mgmt.containerservice.v2024_07_01.models.OSSKU + 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3", + "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404". + :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU :ivar vm_size: The size of the VM. :vartype vm_size: str :ivar enable_fips: Whether to use a FIPS-enabled OS. @@ -7477,7 +8836,7 @@ def __init__( self, *, location: str, - tags: Optional[Dict[str, str]] = None, + tags: Optional[dict[str, str]] = None, creation_data: Optional["_models.CreationData"] = None, snapshot_type: Union[str, "_models.SnapshotType"] = "NodePool", **kwargs: Any @@ -7489,19 +8848,19 @@ def __init__( :paramtype location: str :keyword creation_data: CreationData to be used to specify the source agent pool resource ID to create this snapshot. - :paramtype creation_data: ~azure.mgmt.containerservice.v2024_07_01.models.CreationData + :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData :keyword snapshot_type: The type of a snapshot. The default is NodePool. "NodePool" - :paramtype snapshot_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.SnapshotType + :paramtype snapshot_type: str or ~azure.mgmt.containerservice.models.SnapshotType """ super().__init__(tags=tags, location=location, **kwargs) self.creation_data = creation_data self.snapshot_type = snapshot_type - self.kubernetes_version = None - self.node_image_version = None - self.os_type = None - self.os_sku = None - self.vm_size = None - self.enable_fips = None + self.kubernetes_version: Optional[str] = None + self.node_image_version: Optional[str] = None + self.os_type: Optional[Union[str, "_models.OSType"]] = None + self.os_sku: Optional[Union[str, "_models.OSSKU"]] = None + self.vm_size: Optional[str] = None + self.enable_fips: Optional[bool] = None class SnapshotListResult(_serialization.Model): @@ -7510,7 +8869,7 @@ class SnapshotListResult(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of snapshots. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :vartype value: list[~azure.mgmt.containerservice.models.Snapshot] :ivar next_link: The URL to get the next set of snapshot results. :vartype next_link: str """ @@ -7524,17 +8883,17 @@ class SnapshotListResult(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.Snapshot"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.Snapshot"]] = None, **kwargs: Any) -> None: """ :keyword value: The list of snapshots. - :paramtype value: list[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :paramtype value: list[~azure.mgmt.containerservice.models.Snapshot] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None -class SysctlConfig(_serialization.Model): # pylint: disable=too-many-instance-attributes +class SysctlConfig(_serialization.Model): """Sysctl settings for Linux agent nodes. :ivar net_core_somaxconn: Sysctl setting net.core.somaxconn. @@ -7761,15 +9120,14 @@ class SystemData(_serialization.Model): :vartype created_by: str :ivar created_by_type: The type of identity that created the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". - :vartype created_by_type: str or ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :vartype created_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType :ivar created_at: The timestamp of resource creation (UTC). :vartype created_at: ~datetime.datetime :ivar last_modified_by: The identity that last modified the resource. :vartype last_modified_by: str :ivar last_modified_by_type: The type of identity that last modified the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". - :vartype last_modified_by_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :vartype last_modified_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType :ivar last_modified_at: The timestamp of resource last modification (UTC). :vartype last_modified_at: ~datetime.datetime """ @@ -7799,16 +9157,14 @@ def __init__( :paramtype created_by: str :keyword created_by_type: The type of identity that created the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". - :paramtype created_by_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :paramtype created_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType :keyword created_at: The timestamp of resource creation (UTC). :paramtype created_at: ~datetime.datetime :keyword last_modified_by: The identity that last modified the resource. :paramtype last_modified_by: str :keyword last_modified_by_type: The type of identity that last modified the resource. Known values are: "User", "Application", "ManagedIdentity", and "Key". - :paramtype last_modified_by_type: str or - ~azure.mgmt.containerservice.v2024_07_01.models.CreatedByType + :paramtype last_modified_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType :keyword last_modified_at: The timestamp of resource last modification (UTC). :paramtype last_modified_at: ~datetime.datetime """ @@ -7832,7 +9188,7 @@ class TagsObject(_serialization.Model): "tags": {"key": "tags", "type": "{str}"}, } - def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + def __init__(self, *, tags: Optional[dict[str, str]] = None, **kwargs: Any) -> None: """ :keyword tags: Resource tags. :paramtype tags: dict[str, str] @@ -7846,10 +9202,11 @@ class TimeInWeek(_serialization.Model): :ivar day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :vartype day: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay - :ivar hour_slots: Each integer hour represents a time range beginning at 0m after the hour - ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 - UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. + :vartype day: str or ~azure.mgmt.containerservice.models.WeekDay + :ivar hour_slots: A list of hours in the day used to identify a time range. Each integer hour + represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). + 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - + 02:00 UTC time range. :vartype hour_slots: list[int] """ @@ -7862,16 +9219,17 @@ def __init__( self, *, day: Optional[Union[str, "_models.WeekDay"]] = None, - hour_slots: Optional[List[int]] = None, + hour_slots: Optional[list[int]] = None, **kwargs: Any ) -> None: """ :keyword day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :paramtype day: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay - :keyword hour_slots: Each integer hour represents a time range beginning at 0m after the hour - ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 - UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. + :paramtype day: str or ~azure.mgmt.containerservice.models.WeekDay + :keyword hour_slots: A list of hours in the day used to identify a time range. Each integer + hour represents a time range beginning at 0m after the hour ending at the next hour + (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] + means the 00:00 - 02:00 UTC time range. :paramtype hour_slots: list[int] """ super().__init__(**kwargs) @@ -7880,7 +9238,7 @@ def __init__( class TimeSpan(_serialization.Model): - """For example, between 2021-05-25T13:00:00Z and 2021-05-25T14:00:00Z. + """A time range. For example, between 2021-05-25T13:00:00Z and 2021-05-25T14:00:00Z. :ivar start: The start of a time span. :vartype start: ~datetime.datetime @@ -7919,7 +9277,7 @@ class TrustedAccessRole(_serialization.Model): :ivar rules: List of rules for the role. This maps to 'rules' property of `Kubernetes Cluster Role `_. - :vartype rules: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleRule] + :vartype rules: list[~azure.mgmt.containerservice.models.TrustedAccessRoleRule] """ _validation = { @@ -7937,9 +9295,9 @@ class TrustedAccessRole(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.source_resource_type = None - self.name = None - self.rules = None + self.source_resource_type: Optional[str] = None + self.name: Optional[str] = None + self.rules: Optional[list["_models.TrustedAccessRoleRule"]] = None class TrustedAccessRoleBinding(Resource): @@ -7950,7 +9308,7 @@ class TrustedAccessRoleBinding(Resource): All required parameters must be populated in order to send to server. :ivar id: Fully qualified resource ID for the resource. E.g. - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". :vartype id: str :ivar name: The name of the resource. :vartype name: str @@ -7959,11 +9317,11 @@ class TrustedAccessRoleBinding(Resource): :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.mgmt.containerservice.v2024_07_01.models.SystemData + :vartype system_data: ~azure.mgmt.containerservice.models.SystemData :ivar provisioning_state: The current provisioning state of trusted access role binding. Known values are: "Canceled", "Deleting", "Failed", "Succeeded", and "Updating". :vartype provisioning_state: str or - ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBindingProvisioningState + ~azure.mgmt.containerservice.models.TrustedAccessRoleBindingProvisioningState :ivar source_resource_id: The ARM resource ID of source resource that trusted access is configured for. Required. :vartype source_resource_id: str @@ -7992,7 +9350,7 @@ class TrustedAccessRoleBinding(Resource): "roles": {"key": "properties.roles", "type": "[str]"}, } - def __init__(self, *, source_resource_id: str, roles: List[str], **kwargs: Any) -> None: + def __init__(self, *, source_resource_id: str, roles: list[str], **kwargs: Any) -> None: """ :keyword source_resource_id: The ARM resource ID of source resource that trusted access is configured for. Required. @@ -8002,7 +9360,7 @@ def __init__(self, *, source_resource_id: str, roles: List[str], **kwargs: Any) :paramtype roles: list[str] """ super().__init__(**kwargs) - self.provisioning_state = None + self.provisioning_state: Optional[Union[str, "_models.TrustedAccessRoleBindingProvisioningState"]] = None self.source_resource_id = source_resource_id self.roles = roles @@ -8013,7 +9371,7 @@ class TrustedAccessRoleBindingListResult(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: Role binding list. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :vartype value: list[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :ivar next_link: Link to next page of resources. :vartype next_link: str """ @@ -8027,15 +9385,14 @@ class TrustedAccessRoleBindingListResult(_serialization.Model): "next_link": {"key": "nextLink", "type": "str"}, } - def __init__(self, *, value: Optional[List["_models.TrustedAccessRoleBinding"]] = None, **kwargs: Any) -> None: + def __init__(self, *, value: Optional[list["_models.TrustedAccessRoleBinding"]] = None, **kwargs: Any) -> None: """ :keyword value: Role binding list. - :paramtype value: - list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + :paramtype value: list[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] """ super().__init__(**kwargs) self.value = value - self.next_link = None + self.next_link: Optional[str] = None class TrustedAccessRoleListResult(_serialization.Model): @@ -8044,7 +9401,7 @@ class TrustedAccessRoleListResult(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. :ivar value: Role list. - :vartype value: list[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + :vartype value: list[~azure.mgmt.containerservice.models.TrustedAccessRole] :ivar next_link: Link to next page of resources. :vartype next_link: str """ @@ -8062,8 +9419,8 @@ class TrustedAccessRoleListResult(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.value = None - self.next_link = None + self.value: Optional[list["_models.TrustedAccessRole"]] = None + self.next_link: Optional[str] = None class TrustedAccessRoleRule(_serialization.Model): @@ -8102,11 +9459,11 @@ class TrustedAccessRoleRule(_serialization.Model): def __init__(self, **kwargs: Any) -> None: """ """ super().__init__(**kwargs) - self.verbs = None - self.api_groups = None - self.resources = None - self.resource_names = None - self.non_resource_ur_ls = None + self.verbs: Optional[list[str]] = None + self.api_groups: Optional[list[str]] = None + self.resources: Optional[list[str]] = None + self.resource_names: Optional[list[str]] = None + self.non_resource_ur_ls: Optional[list[str]] = None class UpgradeOverrideSettings(_serialization.Model): @@ -8147,6 +9504,52 @@ def __init__( self.until = until +class VirtualMachineNodes(_serialization.Model): + """Current status on a group of nodes of the same vm size. + + :ivar size: The VM size of the agents used to host this group of nodes. + :vartype size: str + :ivar count: Number of nodes. + :vartype count: int + """ + + _attribute_map = { + "size": {"key": "size", "type": "str"}, + "count": {"key": "count", "type": "int"}, + } + + def __init__(self, *, size: Optional[str] = None, count: Optional[int] = None, **kwargs: Any) -> None: + """ + :keyword size: The VM size of the agents used to host this group of nodes. + :paramtype size: str + :keyword count: Number of nodes. + :paramtype count: int + """ + super().__init__(**kwargs) + self.size = size + self.count = count + + +class VirtualMachinesProfile(_serialization.Model): + """Specifications on VirtualMachines agent pool. + + :ivar scale: Specifications on how to scale a VirtualMachines agent pool. + :vartype scale: ~azure.mgmt.containerservice.models.ScaleProfile + """ + + _attribute_map = { + "scale": {"key": "scale", "type": "ScaleProfile"}, + } + + def __init__(self, *, scale: Optional["_models.ScaleProfile"] = None, **kwargs: Any) -> None: + """ + :keyword scale: Specifications on how to scale a VirtualMachines agent pool. + :paramtype scale: ~azure.mgmt.containerservice.models.ScaleProfile + """ + super().__init__(**kwargs) + self.scale = scale + + class WeeklySchedule(_serialization.Model): """For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'. @@ -8156,7 +9559,7 @@ class WeeklySchedule(_serialization.Model): :vartype interval_weeks: int :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :vartype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :vartype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay """ _validation = { @@ -8177,7 +9580,7 @@ def __init__(self, *, interval_weeks: int, day_of_week: Union[str, "_models.Week :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". - :paramtype day_of_week: str or ~azure.mgmt.containerservice.v2024_07_01.models.WeekDay + :paramtype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay """ super().__init__(**kwargs) self.interval_weeks = interval_weeks @@ -8187,15 +9590,16 @@ def __init__(self, *, interval_weeks: int, day_of_week: Union[str, "_models.Week class WindowsGmsaProfile(_serialization.Model): """Windows gMSA Profile in the managed cluster. - :ivar enabled: Specifies whether to enable Windows gMSA in the managed cluster. + :ivar enabled: Whether to enable Windows gMSA. Specifies whether to enable Windows gMSA in the + managed cluster. :vartype enabled: bool - :ivar dns_server: Specifies the DNS server for Windows gMSA. :code:`
`:code:`
` Set it to - empty if you have configured the DNS server in the vnet which is used to create the managed + :ivar dns_server: Specifies the DNS server for Windows gMSA. :code:`
`\\ :code:`
` Set it + to empty if you have configured the DNS server in the vnet which is used to create the managed cluster. :vartype dns_server: str - :ivar root_domain_name: Specifies the root domain name for Windows gMSA. - :code:`
`:code:`
` Set it to empty if you have configured the DNS server in the vnet - which is used to create the managed cluster. + :ivar root_domain_name: Specifies the root domain name for Windows gMSA. :code:`
`\\ + :code:`
` Set it to empty if you have configured the DNS server in the vnet which is used to + create the managed cluster. :vartype root_domain_name: str """ @@ -8214,15 +9618,16 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword enabled: Specifies whether to enable Windows gMSA in the managed cluster. + :keyword enabled: Whether to enable Windows gMSA. Specifies whether to enable Windows gMSA in + the managed cluster. :paramtype enabled: bool - :keyword dns_server: Specifies the DNS server for Windows gMSA. :code:`
`:code:`
` Set it - to empty if you have configured the DNS server in the vnet which is used to create the managed - cluster. + :keyword dns_server: Specifies the DNS server for Windows gMSA. :code:`
`\\ :code:`
` Set + it to empty if you have configured the DNS server in the vnet which is used to create the + managed cluster. :paramtype dns_server: str - :keyword root_domain_name: Specifies the root domain name for Windows gMSA. - :code:`
`:code:`
` Set it to empty if you have configured the DNS server in the vnet - which is used to create the managed cluster. + :keyword root_domain_name: Specifies the root domain name for Windows gMSA. :code:`
`\\ + :code:`
` Set it to empty if you have configured the DNS server in the vnet which is used to + create the managed cluster. :paramtype root_domain_name: str """ super().__init__(**kwargs) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_patch.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_patch.py index f7dd3251033..8bcb627aa47 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/operations/_patch.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/models/_patch.py @@ -1,7 +1,8 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/__init__.py similarity index 61% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/__init__.py index d59e9e8a9f1..70b2a677e7b 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/__init__.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/__init__.py @@ -5,27 +5,35 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._operations import Operations -from ._managed_clusters_operations import ManagedClustersOperations -from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations -from ._agent_pools_operations import AgentPoolsOperations -from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations -from ._private_link_resources_operations import PrivateLinkResourcesOperations -from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations -from ._snapshots_operations import SnapshotsOperations -from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations -from ._trusted_access_roles_operations import TrustedAccessRolesOperations -from ._machines_operations import MachinesOperations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import Operations # type: ignore +from ._managed_clusters_operations import ManagedClustersOperations # type: ignore +from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations # type: ignore +from ._managed_namespaces_operations import ManagedNamespacesOperations # type: ignore +from ._agent_pools_operations import AgentPoolsOperations # type: ignore +from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore +from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore +from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations # type: ignore +from ._snapshots_operations import SnapshotsOperations # type: ignore +from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations # type: ignore +from ._trusted_access_roles_operations import TrustedAccessRolesOperations # type: ignore +from ._machines_operations import MachinesOperations # type: ignore from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ "Operations", "ManagedClustersOperations", "MaintenanceConfigurationsOperations", + "ManagedNamespacesOperations", "AgentPoolsOperations", "PrivateEndpointConnectionsOperations", "PrivateLinkResourcesOperations", @@ -35,5 +43,5 @@ "TrustedAccessRolesOperations", "MachinesOperations", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_agent_pools_operations.py similarity index 87% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_agent_pools_operations.py index 105b291dc8e..203a6ca90b7 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_agent_pools_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_agent_pools_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,14 +32,12 @@ from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -50,14 +49,14 @@ def build_abort_latest_operation_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/agentPools/{agentPoolName}/abort", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -93,14 +92,14 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -133,14 +132,14 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -171,12 +170,19 @@ def build_get_request( def build_create_or_update_request( - resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + subscription_id: str, + *, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -184,7 +190,7 @@ def build_create_or_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -209,6 +215,10 @@ def build_create_or_update_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -217,19 +227,26 @@ def build_create_or_update_request( def build_delete_request( - resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + subscription_id: str, + *, + ignore_pod_disruption_budget: Optional[bool] = None, + if_match: Optional[str] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -252,8 +269,14 @@ def build_delete_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if ignore_pod_disruption_budget is not None: + _params["ignore-pod-disruption-budget"] = _SERIALIZER.query( + "ignore_pod_disruption_budget", ignore_pod_disruption_budget, "bool" + ) # Construct headers + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -265,14 +288,14 @@ def build_get_upgrade_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -308,7 +331,7 @@ def build_delete_machines_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -316,7 +339,7 @@ def build_delete_machines_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/deleteMachines", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -354,14 +377,14 @@ def build_get_available_agent_pool_versions_request( # pylint: disable=name-too _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -394,14 +417,14 @@ def build_upgrade_node_image_version_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -437,24 +460,23 @@ class AgentPoolsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`agent_pools` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") def _abort_latest_operation_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -465,7 +487,7 @@ def _abort_latest_operation_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_abort_latest_operation_request( @@ -493,7 +515,11 @@ def _abort_latest_operation_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -533,7 +559,7 @@ def begin_abort_latest_operation( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -574,7 +600,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore @distributed_trace - def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterable["_models.AgentPool"]: + def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> ItemPaged["_models.AgentPool"]: """Gets a list of agent pools in the specified managed cluster. Gets a list of agent pools in the specified managed cluster. @@ -585,16 +611,16 @@ def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> I :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: An iterator like instance of either AgentPool or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -624,7 +650,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -650,7 +676,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -672,10 +702,10 @@ def get( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: AgentPool or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :rtype: ~azure.mgmt.containerservice.models.AgentPool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -686,7 +716,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) _request = build_get_request( @@ -709,7 +739,11 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPool", pipeline_response.http_response) @@ -724,9 +758,11 @@ def _create_or_update_initial( resource_name: str, agent_pool_name: str, parameters: Union[_models.AgentPool, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -737,7 +773,7 @@ def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -754,6 +790,8 @@ def _create_or_update_initial( resource_name=resource_name, agent_pool_name=agent_pool_name, subscription_id=self._config.subscription_id, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, json=_json, @@ -777,7 +815,11 @@ def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -793,6 +835,8 @@ def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: _models.AgentPool, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -809,13 +853,18 @@ def begin_create_or_update( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :param parameters: The agent pool to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool + :type parameters: ~azure.mgmt.containerservice.models.AgentPool + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -826,6 +875,8 @@ def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -843,12 +894,17 @@ def begin_create_or_update( :type agent_pool_name: str :param parameters: The agent pool to create or update. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -859,6 +915,8 @@ def begin_create_or_update( resource_name: str, agent_pool_name: str, parameters: Union[_models.AgentPool, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> LROPoller[_models.AgentPool]: """Creates or updates an agent pool in the specified managed cluster. @@ -874,16 +932,21 @@ def begin_create_or_update( :type agent_pool_name: str :param parameters: The agent pool to create or update. Is either a AgentPool type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPool or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.AgentPool or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -895,6 +958,8 @@ def begin_create_or_update( resource_name=resource_name, agent_pool_name=agent_pool_name, parameters=parameters, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -929,9 +994,15 @@ def get_long_running_output(pipeline_response): ) def _delete_initial( - self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + ignore_pod_disruption_budget: Optional[bool] = None, + if_match: Optional[str] = None, + **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -942,7 +1013,7 @@ def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -950,6 +1021,8 @@ def _delete_initial( resource_name=resource_name, agent_pool_name=agent_pool_name, subscription_id=self._config.subscription_id, + ignore_pod_disruption_budget=ignore_pod_disruption_budget, + if_match=if_match, api_version=api_version, headers=_headers, params=_params, @@ -970,7 +1043,11 @@ def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -985,7 +1062,13 @@ def _delete_initial( @distributed_trace def begin_delete( - self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any + self, + resource_group_name: str, + resource_name: str, + agent_pool_name: str, + ignore_pod_disruption_budget: Optional[bool] = None, + if_match: Optional[str] = None, + **kwargs: Any ) -> LROPoller[None]: """Deletes an agent pool in the specified managed cluster. @@ -998,6 +1081,12 @@ def begin_delete( :type resource_name: str :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str + :param ignore_pod_disruption_budget: ignore-pod-disruption-budget=true to delete those pods on + a node without considering Pod Disruption Budget. Default value is None. + :type ignore_pod_disruption_budget: bool + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -1005,7 +1094,7 @@ def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -1015,6 +1104,8 @@ def begin_delete( resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, + ignore_pod_disruption_budget=ignore_pod_disruption_budget, + if_match=if_match, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, @@ -1059,10 +1150,10 @@ def get_upgrade_profile( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: AgentPoolUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.AgentPoolUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1073,7 +1164,7 @@ def get_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_upgrade_profile_request( @@ -1096,7 +1187,11 @@ def get_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response) @@ -1113,7 +1208,7 @@ def _delete_machines_initial( machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]], **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1124,7 +1219,7 @@ def _delete_machines_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -1164,7 +1259,10 @@ def _delete_machines_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} @@ -1200,8 +1298,7 @@ def begin_delete_machines( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :param machines: A list of machines from the agent pool to be deleted. Required. - :type machines: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter + :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1264,8 +1361,8 @@ def begin_delete_machines( :type agent_pool_name: str :param machines: A list of machines from the agent pool to be deleted. Is either a AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required. - :type machines: - ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolDeleteMachinesParameter or IO[bytes] + :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter or + IO[bytes] :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -1273,7 +1370,7 @@ def begin_delete_machines( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -1330,10 +1427,10 @@ def get_available_agent_pool_versions( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: AgentPoolAvailableVersions or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.AgentPoolAvailableVersions + :rtype: ~azure.mgmt.containerservice.models.AgentPoolAvailableVersions :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1344,7 +1441,7 @@ def get_available_agent_pool_versions( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None) _request = build_get_available_agent_pool_versions_request( @@ -1366,7 +1463,11 @@ def get_available_agent_pool_versions( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response) @@ -1378,7 +1479,7 @@ def get_available_agent_pool_versions( def _upgrade_node_image_version_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1389,7 +1490,7 @@ def _upgrade_node_image_version_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_upgrade_node_image_version_request( @@ -1417,7 +1518,11 @@ def _upgrade_node_image_version_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -1450,14 +1555,13 @@ def begin_upgrade_node_image_version( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: An instance of LROPoller that returns either AgentPool or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.AgentPool] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_machines_operations.py similarity index 85% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_machines_operations.py index 0e8b467df4d..48bbc9dc786 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_machines_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_machines_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +6,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -26,14 +27,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -45,14 +44,14 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -93,14 +92,14 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines/{machineName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -139,24 +138,23 @@ class MachinesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`machines` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any - ) -> Iterable["_models.Machine"]: + ) -> ItemPaged["_models.Machine"]: """Gets a list of machines in the specified agent pool. Gets a list of machines in the specified agent pool. @@ -169,16 +167,16 @@ def list( :param agent_pool_name: The name of the agent pool. Required. :type agent_pool_name: str :return: An iterator like instance of either Machine or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Machine] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Machine] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -209,7 +207,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -235,7 +233,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -259,10 +261,10 @@ def get( :param machine_name: host name of the machine. Required. :type machine_name: str :return: Machine or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Machine + :rtype: ~azure.mgmt.containerservice.models.Machine :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -273,7 +275,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.Machine] = kwargs.pop("cls", None) _request = build_get_request( @@ -297,7 +299,11 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Machine", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_maintenance_configurations_operations.py similarity index 85% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_maintenance_configurations_operations.py index 0f092d0fda9..5a1763a2421 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_maintenance_configurations_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_maintenance_configurations_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -27,14 +28,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -46,14 +45,14 @@ def build_list_by_managed_cluster_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -86,14 +85,14 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -127,7 +126,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -135,7 +134,7 @@ def build_create_or_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -171,14 +170,14 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -212,24 +211,23 @@ class MaintenanceConfigurationsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`maintenance_configurations` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list_by_managed_cluster( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> Iterable["_models.MaintenanceConfiguration"]: + ) -> ItemPaged["_models.MaintenanceConfiguration"]: """Gets a list of maintenance configurations in the specified managed cluster. Gets a list of maintenance configurations in the specified managed cluster. @@ -242,16 +240,16 @@ def list_by_managed_cluster( :return: An iterator like instance of either MaintenanceConfiguration or the result of cls(response) :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration] + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MaintenanceConfiguration] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -281,7 +279,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -307,7 +305,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -326,13 +328,14 @@ def get( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -343,7 +346,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) _request = build_get_request( @@ -366,7 +369,11 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) @@ -395,15 +402,16 @@ def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ @@ -427,7 +435,8 @@ def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Required. :type parameters: IO[bytes] @@ -435,7 +444,7 @@ def create_or_update( Default value is "application/json". :paramtype content_type: str :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ @@ -457,17 +466,17 @@ def create_or_update( :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :param parameters: The maintenance configuration to create or update. Is either a MaintenanceConfiguration type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration or IO[bytes] :return: MaintenanceConfiguration or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MaintenanceConfiguration + :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -478,7 +487,7 @@ def create_or_update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None) @@ -513,7 +522,11 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response) @@ -535,13 +548,14 @@ def delete( # pylint: disable=inconsistent-return-statements :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str - :param config_name: The name of the maintenance configuration. Required. + :param config_name: The name of the maintenance configuration. Supported values are 'default', + 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required. :type config_name: str :return: None or the result of cls(response) :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -552,7 +566,7 @@ def delete( # pylint: disable=inconsistent-return-statements _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) _request = build_delete_request( @@ -575,7 +589,11 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_clusters_operations.py similarity index 89% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_clusters_operations.py index 72647cc95c1..e32ef0b8666 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_managed_clusters_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_clusters_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,14 +32,12 @@ from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -48,14 +47,14 @@ def build_list_kubernetes_versions_request(location: str, subscription_id: str, _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/kubernetesVersions", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "location": _SERIALIZER.url("location", location, "str", min_length=1), @@ -76,7 +75,7 @@ def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -102,14 +101,14 @@ def build_list_by_resource_group_request(resource_group_name: str, subscription_ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -134,14 +133,14 @@ def build_get_upgrade_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -174,14 +173,14 @@ def build_get_access_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -220,14 +219,14 @@ def build_list_cluster_admin_credentials_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -268,14 +267,14 @@ def build_list_cluster_user_credentials_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -317,14 +316,14 @@ def build_list_cluster_monitoring_user_credentials_request( # pylint: disable=n _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -357,14 +356,14 @@ def build_get_request(resource_group_name: str, resource_name: str, subscription _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -392,12 +391,18 @@ def build_get_request(resource_group_name: str, resource_name: str, subscription def build_create_or_update_request( - resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any + resource_group_name: str, + resource_name: str, + subscription_id: str, + *, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -405,7 +410,7 @@ def build_create_or_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -427,6 +432,10 @@ def build_create_or_update_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -435,12 +444,12 @@ def build_create_or_update_request( def build_update_tags_request( - resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any + resource_group_name: str, resource_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -448,7 +457,7 @@ def build_update_tags_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -470,6 +479,8 @@ def build_update_tags_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -478,19 +489,19 @@ def build_update_tags_request( def build_delete_request( - resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any + resource_group_name: str, resource_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -512,6 +523,8 @@ def build_delete_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -523,7 +536,7 @@ def build_reset_service_principal_profile_request( # pylint: disable=name-too-l _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -531,7 +544,7 @@ def build_reset_service_principal_profile_request( # pylint: disable=name-too-l _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -566,7 +579,7 @@ def build_reset_aad_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -574,7 +587,7 @@ def build_reset_aad_profile_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -609,14 +622,14 @@ def build_rotate_cluster_certificates_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -649,14 +662,14 @@ def build_abort_latest_operation_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/abort", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -689,14 +702,14 @@ def build_rotate_service_account_signing_keys_request( # pylint: disable=name-t _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateServiceAccountSigningKeys", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -729,14 +742,14 @@ def build_stop_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -769,14 +782,14 @@ def build_start_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -809,7 +822,7 @@ def build_run_command_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -817,7 +830,7 @@ def build_run_command_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -852,14 +865,14 @@ def build_get_command_result_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -893,14 +906,14 @@ def build_list_outbound_network_dependencies_endpoints_request( # pylint: disab _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -933,14 +946,14 @@ def build_list_mesh_revision_profiles_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "location": _SERIALIZER.url("location", location, "str", min_length=1), @@ -963,14 +976,14 @@ def build_get_mesh_revision_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles/{mode}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "location": _SERIALIZER.url("location", location, "str", min_length=1), @@ -1001,14 +1014,14 @@ def build_list_mesh_upgrade_profiles_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -1041,14 +1054,14 @@ def build_get_mesh_upgrade_profile_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles/{mode}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -1089,19 +1102,18 @@ class ManagedClustersOperations: # pylint: disable=too-many-public-methods **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`managed_clusters` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult: @@ -1113,10 +1125,10 @@ def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.Kube :param location: The name of the Azure region. Required. :type location: str :return: KubernetesVersionListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.KubernetesVersionListResult + :rtype: ~azure.mgmt.containerservice.models.KubernetesVersionListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1127,7 +1139,7 @@ def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.Kube _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None) _request = build_list_kubernetes_versions_request( @@ -1148,7 +1160,11 @@ def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.Kube if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response) @@ -1158,23 +1174,22 @@ def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.Kube return deserialized # type: ignore @distributed_trace - def list(self, **kwargs: Any) -> Iterable["_models.ManagedCluster"]: + def list(self, **kwargs: Any) -> ItemPaged["_models.ManagedCluster"]: """Gets a list of managed clusters in the specified subscription. Gets a list of managed clusters in the specified subscription. :return: An iterator like instance of either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1202,7 +1217,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -1228,14 +1243,18 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ManagedCluster"]: + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> ItemPaged["_models.ManagedCluster"]: """Lists managed clusters in the specified subscription and resource group. Lists managed clusters in the specified subscription and resource group. @@ -1244,17 +1263,16 @@ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Ite Required. :type resource_group_name: str :return: An iterator like instance of either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1283,7 +1301,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -1309,7 +1327,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -1329,10 +1351,10 @@ def get_upgrade_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: ManagedClusterUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.ManagedClusterUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1343,7 +1365,7 @@ def get_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_upgrade_profile_request( @@ -1365,7 +1387,11 @@ def get_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response) @@ -1393,10 +1419,10 @@ def get_access_profile( :param role_name: The name of the role for managed cluster accessProfile resource. Required. :type role_name: str :return: ManagedClusterAccessProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAccessProfile + :rtype: ~azure.mgmt.containerservice.models.ManagedClusterAccessProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1407,7 +1433,7 @@ def get_access_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None) _request = build_get_access_profile_request( @@ -1430,7 +1456,11 @@ def get_access_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response) @@ -1455,10 +1485,10 @@ def list_cluster_admin_credentials( :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. :type server_fqdn: str :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1469,7 +1499,7 @@ def list_cluster_admin_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_admin_credentials_request( @@ -1492,7 +1522,11 @@ def list_cluster_admin_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -1525,12 +1559,12 @@ def list_cluster_user_credentials( 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and "exec". Default value is None. - :type format: str or ~azure.mgmt.containerservice.v2024_07_01.models.Format + :type format: str or ~azure.mgmt.containerservice.models.Format :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1541,7 +1575,7 @@ def list_cluster_user_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_user_credentials_request( @@ -1565,7 +1599,11 @@ def list_cluster_user_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -1590,10 +1628,10 @@ def list_cluster_monitoring_user_credentials( :param server_fqdn: server fqdn type for credentials to be returned. Default value is None. :type server_fqdn: str :return: CredentialResults or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.CredentialResults + :rtype: ~azure.mgmt.containerservice.models.CredentialResults :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1604,7 +1642,7 @@ def list_cluster_monitoring_user_credentials( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) _request = build_list_cluster_monitoring_user_credentials_request( @@ -1627,7 +1665,11 @@ def list_cluster_monitoring_user_credentials( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) @@ -1648,10 +1690,10 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: ManagedCluster or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :rtype: ~azure.mgmt.containerservice.models.ManagedCluster :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1662,7 +1704,7 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) _request = build_get_request( @@ -1684,7 +1726,11 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response) @@ -1698,9 +1744,11 @@ def _create_or_update_initial( resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1711,7 +1759,7 @@ def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -1727,6 +1775,8 @@ def _create_or_update_initial( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, json=_json, @@ -1750,7 +1800,11 @@ def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -1765,6 +1819,8 @@ def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: _models.ManagedCluster, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -1779,14 +1835,19 @@ def begin_create_or_update( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The managed cluster to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster + :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1796,6 +1857,8 @@ def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -1811,13 +1874,18 @@ def begin_create_or_update( :type resource_name: str :param parameters: The managed cluster to create or update. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1827,6 +1895,8 @@ def begin_create_or_update( resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO[bytes]], + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, **kwargs: Any ) -> LROPoller[_models.ManagedCluster]: """Creates or updates a managed cluster. @@ -1840,17 +1910,22 @@ def begin_create_or_update( :type resource_name: str :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str + :param if_none_match: The request should only proceed if no entity matches this string. Default + value is None. + :type if_none_match: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -1861,6 +1936,8 @@ def begin_create_or_update( resource_group_name=resource_group_name, resource_name=resource_name, parameters=parameters, + if_match=if_match, + if_none_match=if_none_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -1899,9 +1976,10 @@ def _update_tags_initial( resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO[bytes]], + if_match: Optional[str] = None, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1912,7 +1990,7 @@ def _update_tags_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -1928,6 +2006,7 @@ def _update_tags_initial( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, api_version=api_version, content_type=content_type, json=_json, @@ -1951,7 +2030,11 @@ def _update_tags_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -1966,6 +2049,7 @@ def begin_update_tags( resource_group_name: str, resource_name: str, parameters: _models.TagsObject, + if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -1980,14 +2064,16 @@ def begin_update_tags( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :type parameters: ~azure.mgmt.containerservice.models.TagsObject + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1997,6 +2083,7 @@ def begin_update_tags( resource_group_name: str, resource_name: str, parameters: IO[bytes], + if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any @@ -2012,13 +2099,15 @@ def begin_update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required. :type parameters: IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2028,6 +2117,7 @@ def begin_update_tags( resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO[bytes]], + if_match: Optional[str] = None, **kwargs: Any ) -> LROPoller[_models.ManagedCluster]: """Updates tags on a managed cluster. @@ -2041,17 +2131,19 @@ def begin_update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either a TagsObject type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.ManagedCluster] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -2062,6 +2154,7 @@ def begin_update_tags( resource_group_name=resource_group_name, resource_name=resource_name, parameters=parameters, + if_match=if_match, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, @@ -2095,8 +2188,10 @@ def get_long_running_output(pipeline_response): self._client, raw_result, get_long_running_output, polling_method # type: ignore ) - def _delete_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + def _delete_initial( + self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2107,13 +2202,14 @@ def _delete_initial(self, resource_group_name: str, resource_name: str, **kwargs _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( resource_group_name=resource_group_name, resource_name=resource_name, subscription_id=self._config.subscription_id, + if_match=if_match, api_version=api_version, headers=_headers, params=_params, @@ -2134,7 +2230,11 @@ def _delete_initial(self, resource_group_name: str, resource_name: str, **kwargs except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2148,7 +2248,9 @@ def _delete_initial(self, resource_group_name: str, resource_name: str, **kwargs return deserialized # type: ignore @distributed_trace - def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]: + def begin_delete( + self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any + ) -> LROPoller[None]: """Deletes a managed cluster. Deletes a managed cluster. @@ -2158,6 +2260,9 @@ def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: A :type resource_group_name: str :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str + :param if_match: The request should only proceed if an entity matches this string. Default + value is None. + :type if_match: str :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -2165,7 +2270,7 @@ def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: A _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2174,6 +2279,7 @@ def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: A raw_result = self._delete_initial( resource_group_name=resource_group_name, resource_name=resource_name, + if_match=if_match, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, @@ -2209,7 +2315,7 @@ def _reset_service_principal_profile_initial( parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]], **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2220,7 +2326,7 @@ def _reset_service_principal_profile_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -2259,7 +2365,11 @@ def _reset_service_principal_profile_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2292,8 +2402,7 @@ def begin_reset_service_principal_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The service principal profile to set on the managed cluster. Required. - :type parameters: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2350,8 +2459,7 @@ def begin_reset_service_principal_profile( :type resource_name: str :param parameters: The service principal profile to set on the managed cluster. Is either a ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required. - :type parameters: - ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterServicePrincipalProfile or + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile or IO[bytes] :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] @@ -2360,7 +2468,7 @@ def begin_reset_service_principal_profile( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -2409,7 +2517,7 @@ def _reset_aad_profile_initial( parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]], **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2420,7 +2528,7 @@ def _reset_aad_profile_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -2459,7 +2567,11 @@ def _reset_aad_profile_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2494,7 +2606,7 @@ def begin_reset_aad_profile( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The AAD profile to set on the Managed Cluster. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2555,8 +2667,7 @@ def begin_reset_aad_profile( :type resource_name: str :param parameters: The AAD profile to set on the Managed Cluster. Is either a ManagedClusterAADProfile type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.ManagedClusterAADProfile or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile or IO[bytes] :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: @@ -2564,7 +2675,7 @@ def begin_reset_aad_profile( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -2609,7 +2720,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- def _rotate_cluster_certificates_initial( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2620,7 +2731,7 @@ def _rotate_cluster_certificates_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_rotate_cluster_certificates_request( @@ -2647,7 +2758,11 @@ def _rotate_cluster_certificates_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2681,7 +2796,7 @@ def begin_rotate_cluster_certificates( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2723,7 +2838,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- def _abort_latest_operation_initial( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2734,7 +2849,7 @@ def _abort_latest_operation_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_abort_latest_operation_request( @@ -2761,7 +2876,11 @@ def _abort_latest_operation_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2799,7 +2918,7 @@ def begin_abort_latest_operation( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2841,7 +2960,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2852,7 +2971,7 @@ def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-lo _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_rotate_service_account_signing_keys_request( @@ -2879,7 +2998,11 @@ def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-lo except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -2912,7 +3035,7 @@ def begin_rotate_service_account_signing_keys( # pylint: disable=name-too-long _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -2952,7 +3075,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2963,7 +3086,7 @@ def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_stop_request( @@ -2990,7 +3113,11 @@ def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -3025,7 +3152,7 @@ def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -3065,7 +3192,7 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent- return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3076,7 +3203,7 @@ def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_start_request( @@ -3103,7 +3230,11 @@ def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -3135,7 +3266,7 @@ def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: An _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) @@ -3181,7 +3312,7 @@ def _run_command_initial( request_payload: Union[_models.RunCommandRequest, IO[bytes]], **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3192,7 +3323,7 @@ def _run_command_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -3231,7 +3362,11 @@ def _run_command_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 202: @@ -3266,14 +3401,13 @@ def begin_run_command( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param request_payload: The run command request. Required. - :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest + :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either RunCommandResult or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -3305,8 +3439,7 @@ def begin_run_command( :paramtype content_type: str :return: An instance of LROPoller that returns either RunCommandResult or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -3331,18 +3464,16 @@ def begin_run_command( :type resource_name: str :param request_payload: The run command request. Is either a RunCommandRequest type or a IO[bytes] type. Required. - :type request_payload: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandRequest or - IO[bytes] + :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest or IO[bytes] :return: An instance of LROPoller that returns either RunCommandResult or the result of cls(response) - :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult] + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -3404,10 +3535,10 @@ def get_command_result( :param command_id: Id of the command. Required. :type command_id: str :return: RunCommandResult or None or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.RunCommandResult or None + :rtype: ~azure.mgmt.containerservice.models.RunCommandResult or None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3418,7 +3549,7 @@ def get_command_result( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None) _request = build_get_command_result_request( @@ -3441,7 +3572,11 @@ def get_command_result( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None response_headers = {} @@ -3459,7 +3594,7 @@ def get_command_result( @distributed_trace def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> Iterable["_models.OutboundEnvironmentEndpoint"]: + ) -> ItemPaged["_models.OutboundEnvironmentEndpoint"]: """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the specified managed cluster. @@ -3474,16 +3609,16 @@ def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-lo :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of cls(response) :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OutboundEnvironmentEndpoint] + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3513,7 +3648,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -3539,14 +3674,18 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> Iterable["_models.MeshRevisionProfile"]: + def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> ItemPaged["_models.MeshRevisionProfile"]: """Lists mesh revision profiles for all meshes in the specified location. Contains extra metadata on each revision, including supported revisions, cluster compatibility @@ -3555,17 +3694,16 @@ def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> Iterable[ :param location: The name of the Azure region. Required. :type location: str :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MeshRevisionProfile] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3594,7 +3732,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -3620,7 +3758,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -3638,10 +3780,10 @@ def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> :param mode: The mode of the mesh. Required. :type mode: str :return: MeshRevisionProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshRevisionProfile + :rtype: ~azure.mgmt.containerservice.models.MeshRevisionProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3652,7 +3794,7 @@ def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None) _request = build_get_mesh_revision_profile_request( @@ -3674,7 +3816,11 @@ def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response) @@ -3686,7 +3832,7 @@ def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> @distributed_trace def list_mesh_upgrade_profiles( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> Iterable["_models.MeshUpgradeProfile"]: + ) -> ItemPaged["_models.MeshUpgradeProfile"]: """Lists available upgrades for all service meshes in a specific cluster. Lists available upgrades for all service meshes in a specific cluster. @@ -3697,17 +3843,16 @@ def list_mesh_upgrade_profiles( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MeshUpgradeProfile] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3737,7 +3882,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -3763,7 +3908,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -3785,10 +3934,10 @@ def get_mesh_upgrade_profile( :param mode: The mode of the mesh. Required. :type mode: str :return: MeshUpgradeProfile or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.MeshUpgradeProfile + :rtype: ~azure.mgmt.containerservice.models.MeshUpgradeProfile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3799,7 +3948,7 @@ def get_mesh_upgrade_profile( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None) _request = build_get_mesh_upgrade_profile_request( @@ -3822,7 +3971,11 @@ def get_mesh_upgrade_profile( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_namespaces_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_namespaces_operations.py new file mode 100644 index 00000000000..8dfe5b99e86 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_managed_namespaces_operations.py @@ -0,0 +1,1086 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core import PipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_list_by_managed_cluster_request( + resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_request( + resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "managedNamespaceName": _SERIALIZER.url( + "managed_namespace_name", + managed_namespace_name, + "str", + max_length=63, + min_length=1, + pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_or_update_request( + resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "managedNamespaceName": _SERIALIZER.url( + "managed_namespace_name", + managed_namespace_name, + "str", + max_length=63, + min_length=1, + pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "managedNamespaceName": _SERIALIZER.url( + "managed_namespace_name", + managed_namespace_name, + "str", + max_length=63, + min_length=1, + pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_request( + resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "managedNamespaceName": _SERIALIZER.url( + "managed_namespace_name", + managed_namespace_name, + "str", + max_length=63, + min_length=1, + pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_credential_request( + resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}/listCredential", + ) + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url( + "resource_group_name", resource_group_name, "str", max_length=90, min_length=1 + ), + "resourceName": _SERIALIZER.url( + "resource_name", + resource_name, + "str", + max_length=63, + min_length=1, + pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$", + ), + "managedNamespaceName": _SERIALIZER.url( + "managed_namespace_name", + managed_namespace_name, + "str", + max_length=63, + min_length=1, + pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class ManagedNamespacesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s + :attr:`managed_namespaces` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_managed_cluster( + self, resource_group_name: str, resource_name: str, **kwargs: Any + ) -> ItemPaged["_models.ManagedNamespace"]: + """Gets a list of managed namespaces in the specified managed cluster. + + Gets a list of managed namespaces in the specified managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :return: An iterator like instance of either ManagedNamespace or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.ManagedNamespaceListResult] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_list_by_managed_cluster_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + _request.url = self._client.format_url(_request.url) + _request.method = "GET" + return _request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ManagedNamespaceListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> _models.ManagedNamespace: + """Gets the specified namespace of a managed cluster. + + Gets the specified namespace of a managed cluster. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + + _request = build_get_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.ManagedNamespace, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "ManagedNamespace") + + _request = build_create_or_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: _models.ManagedNamespace, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Required. + :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.ManagedNamespace, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.ManagedNamespace]: + """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + Creates or updates a namespace managed by ARM for the specified managed cluster. Users can + configure aspects like resource quotas, network ingress/egress policies, and more. See + aka.ms/aks/managed-namespaces for more details. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: The namespace to create or update. Is either a ManagedNamespace type or a + IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace or IO[bytes] + :return: An instance of LROPoller that returns either ManagedNamespace or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + parameters=parameters, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.ManagedNamespace].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.ManagedNamespace]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_delete_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _decompress = kwargs.pop("decompress", True) + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + if response.status_code == 204: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + + deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a namespace. + + Deletes a namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: _models.TagsObject, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Required. + :type parameters: ~azure.mgmt.containerservice.models.TagsObject + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, + resource_group_name: str, + resource_name: str, + managed_namespace_name: str, + parameters: Union[_models.TagsObject, IO[bytes]], + **kwargs: Any + ) -> _models.ManagedNamespace: + """Updates tags on a managed namespace. + + Updates tags on a managed namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :param parameters: Parameters supplied to the patch namespace operation, we only support patch + tags for now. Is either a TagsObject type or a IO[bytes] type. Required. + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] + :return: ManagedNamespace or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _json = self._serialize.body(parameters, "TagsObject") + + _request = build_update_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_credential( + self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any + ) -> _models.CredentialResults: + """Lists the credentials of a namespace. + + Lists the credentials of a namespace. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param resource_name: The name of the managed cluster resource. Required. + :type resource_name: str + :param managed_namespace_name: The name of the managed namespace. Required. + :type managed_namespace_name: str + :return: CredentialResults or the result of cls(response) + :rtype: ~azure.mgmt.containerservice.models.CredentialResults + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) + cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None) + + _request = build_list_credential_request( + resource_group_name=resource_group_name, + resource_name=resource_name, + managed_namespace_name=managed_namespace_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = self._deserialize("CredentialResults", pipeline_response.http_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_operations.py similarity index 76% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_operations.py index 0245fce11b6..204312518ba 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -26,14 +26,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -43,7 +41,7 @@ def build_list_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -64,38 +62,36 @@ class Operations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`operations` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]: + def list(self, **kwargs: Any) -> ItemPaged["_models.OperationValue"]: """Gets a list of operations. Gets a list of operations. :return: An iterator like instance of either OperationValue or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.OperationValue] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.OperationValue] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -122,7 +118,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -148,7 +144,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_patch.py new file mode 100644 index 00000000000..8bcb627aa47 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_endpoint_connections_operations.py similarity index 88% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_endpoint_connections_operations.py index 8a8b7d422d9..a0e8fc786f9 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_endpoint_connections_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_endpoint_connections_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +6,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterator, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -29,14 +30,12 @@ from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -48,14 +47,14 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -92,14 +91,14 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -139,7 +138,7 @@ def build_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -147,7 +146,7 @@ def build_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -189,14 +188,14 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -232,19 +231,18 @@ class PrivateEndpointConnectionsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`private_endpoint_connections` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( @@ -261,10 +259,10 @@ def list( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: PrivateEndpointConnectionListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnectionListResult + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnectionListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -275,7 +273,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) _request = build_list_request( @@ -297,7 +295,11 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response) @@ -323,10 +325,10 @@ def get( :param private_endpoint_connection_name: The name of the private endpoint connection. Required. :type private_endpoint_connection_name: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -337,7 +339,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) _request = build_get_request( @@ -360,7 +362,11 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) @@ -392,12 +398,12 @@ def update( :param private_endpoint_connection_name: The name of the private endpoint connection. Required. :type private_endpoint_connection_name: str :param parameters: The updated private endpoint connection. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ @@ -429,7 +435,7 @@ def update( Default value is "application/json". :paramtype content_type: str :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ @@ -455,13 +461,12 @@ def update( :type private_endpoint_connection_name: str :param parameters: The updated private endpoint connection. Is either a PrivateEndpointConnection type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection or IO[bytes] :return: PrivateEndpointConnection or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateEndpointConnection + :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -472,7 +477,7 @@ def update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) @@ -507,7 +512,11 @@ def update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response) @@ -519,7 +528,7 @@ def update( def _delete_initial( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -530,7 +539,7 @@ def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -558,7 +567,11 @@ def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -589,7 +602,7 @@ def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_link_resources_operations.py similarity index 79% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_link_resources_operations.py index 8c42443c082..88a56faf5fa 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_private_link_resources_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_private_link_resources_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,9 +6,10 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -24,14 +25,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -43,14 +42,14 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -83,19 +82,18 @@ class PrivateLinkResourcesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`private_link_resources` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( @@ -112,10 +110,10 @@ def list( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: PrivateLinkResourcesListResult or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResourcesListResult + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResourcesListResult :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -126,7 +124,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None) _request = build_list_request( @@ -148,7 +146,11 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_resolve_private_link_service_id_operations.py similarity index 83% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_resolve_private_link_service_id_operations.py index 71b08f43f19..f8b8ab7cfd1 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_resolve_private_link_service_id_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_resolve_private_link_service_id_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +6,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -25,14 +26,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -44,7 +43,7 @@ def build_post_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -52,7 +51,7 @@ def build_post_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -87,19 +86,18 @@ class ResolvePrivateLinkServiceIdOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`resolve_private_link_service_id` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def post( @@ -121,12 +119,12 @@ def post( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters required in order to resolve a private link service ID. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ @@ -155,7 +153,7 @@ def post( Default value is "application/json". :paramtype content_type: str :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ @@ -178,13 +176,12 @@ def post( :type resource_name: str :param parameters: Parameters required in order to resolve a private link service ID. Is either a PrivateLinkResource type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource or - IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource or IO[bytes] :return: PrivateLinkResource or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.PrivateLinkResource + :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -195,7 +192,7 @@ def post( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) @@ -229,7 +226,11 @@ def post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_snapshots_operations.py similarity index 87% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_snapshots_operations.py index ecc1585ad84..805ed298bed 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_snapshots_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_snapshots_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -27,14 +28,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -44,7 +43,7 @@ def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -68,14 +67,14 @@ def build_list_by_resource_group_request(resource_group_name: str, subscription_ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -98,14 +97,14 @@ def build_get_request(resource_group_name: str, resource_name: str, subscription _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -138,7 +137,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -146,7 +145,7 @@ def build_create_or_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -181,7 +180,7 @@ def build_update_tags_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -189,7 +188,7 @@ def build_update_tags_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -224,14 +223,14 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -264,37 +263,36 @@ class SnapshotsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`snapshots` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, **kwargs: Any) -> Iterable["_models.Snapshot"]: + def list(self, **kwargs: Any) -> ItemPaged["_models.Snapshot"]: """Gets a list of snapshots in the specified subscription. Gets a list of snapshots in the specified subscription. :return: An iterator like instance of either Snapshot or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Snapshot] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -322,7 +320,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -348,14 +346,18 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data) @distributed_trace - def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Snapshot"]: + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> ItemPaged["_models.Snapshot"]: """Lists snapshots in the specified subscription and resource group. Lists snapshots in the specified subscription and resource group. @@ -364,16 +366,16 @@ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Ite Required. :type resource_group_name: str :return: An iterator like instance of either Snapshot or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.Snapshot] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Snapshot] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -402,7 +404,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -428,7 +430,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -446,10 +452,10 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -460,7 +466,7 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) _request = build_get_request( @@ -482,7 +488,11 @@ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _m if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -511,12 +521,12 @@ def create_or_update( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: The snapshot to create or update. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :type parameters: ~azure.mgmt.containerservice.models.Snapshot :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -545,7 +555,7 @@ def create_or_update( Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -568,12 +578,12 @@ def create_or_update( :type resource_name: str :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.Snapshot or IO[bytes] :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -584,7 +594,7 @@ def create_or_update( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) @@ -618,7 +628,11 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -647,12 +661,12 @@ def update_tags( :param resource_name: The name of the managed cluster resource. Required. :type resource_name: str :param parameters: Parameters supplied to the Update snapshot Tags operation. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject + :type parameters: ~azure.mgmt.containerservice.models.TagsObject :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -681,7 +695,7 @@ def update_tags( Default value is "application/json". :paramtype content_type: str :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ @@ -704,12 +718,12 @@ def update_tags( :type resource_name: str :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a TagsObject type or a IO[bytes] type. Required. - :type parameters: ~azure.mgmt.containerservice.v2024_07_01.models.TagsObject or IO[bytes] + :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes] :return: Snapshot or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.Snapshot + :rtype: ~azure.mgmt.containerservice.models.Snapshot :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -720,7 +734,7 @@ def update_tags( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) @@ -754,7 +768,11 @@ def update_tags( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("Snapshot", pipeline_response.http_response) @@ -780,7 +798,7 @@ def delete( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -791,7 +809,7 @@ def delete( # pylint: disable=inconsistent-return-statements _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) _request = build_delete_request( @@ -813,7 +831,11 @@ def delete( # pylint: disable=inconsistent-return-statements if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) # type: ignore diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_role_bindings_operations.py similarity index 91% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_role_bindings_operations.py index 70dd5f8d839..4e7e2f450c6 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_role_bindings_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_role_bindings_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,11 +6,12 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, Type, TypeVar, Union, cast, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,14 +32,12 @@ from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -50,14 +49,14 @@ def build_list_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -94,14 +93,14 @@ def build_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -146,7 +145,7 @@ def build_create_or_update_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) accept = _headers.pop("Accept", "application/json") @@ -154,7 +153,7 @@ def build_create_or_update_request( _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -201,14 +200,14 @@ def build_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "resourceGroupName": _SERIALIZER.url( @@ -249,24 +248,23 @@ class TrustedAccessRoleBindingsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`trusted_access_role_bindings` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list( self, resource_group_name: str, resource_name: str, **kwargs: Any - ) -> Iterable["_models.TrustedAccessRoleBinding"]: + ) -> ItemPaged["_models.TrustedAccessRoleBinding"]: """List trusted access role bindings. List trusted access role bindings. @@ -279,16 +277,16 @@ def list( :return: An iterator like instance of either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -318,7 +316,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -344,7 +342,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response @@ -366,10 +368,10 @@ def get( :param trusted_access_role_binding_name: The name of trusted access role binding. Required. :type trusted_access_role_binding_name: str :return: TrustedAccessRoleBinding or the result of cls(response) - :rtype: ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :rtype: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -380,7 +382,7 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) _request = build_get_request( @@ -403,7 +405,11 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response) @@ -420,7 +426,7 @@ def _create_or_update_initial( trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]], **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -431,7 +437,7 @@ def _create_or_update_initial( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) @@ -471,7 +477,10 @@ def _create_or_update_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = response.stream_download(self._client._pipeline, decompress=_decompress) @@ -504,15 +513,14 @@ def begin_create_or_update( :param trusted_access_role_binding_name: The name of trusted access role binding. Required. :type trusted_access_role_binding_name: str :param trusted_access_role_binding: A trusted access role binding. Required. - :type trusted_access_role_binding: - ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding + :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -546,7 +554,7 @@ def begin_create_or_update( :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ @@ -572,18 +580,18 @@ def begin_create_or_update( :type trusted_access_role_binding_name: str :param trusted_access_role_binding: A trusted access role binding. Is either a TrustedAccessRoleBinding type or a IO[bytes] type. Required. - :type trusted_access_role_binding: - ~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding or IO[bytes] + :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding + or IO[bytes] :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of cls(response) :rtype: - ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRoleBinding] + ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) @@ -631,7 +639,7 @@ def get_long_running_output(pipeline_response): def _delete_initial( self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any ) -> Iterator[bytes]: - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -642,7 +650,7 @@ def _delete_initial( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) _request = build_delete_request( @@ -670,7 +678,10 @@ def _delete_initial( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} @@ -706,7 +717,7 @@ def begin_delete( _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_roles_operations.py similarity index 78% rename from src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py rename to src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_roles_operations.py index c90fd5c90c7..b0f3eedfd6f 100644 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_trusted_access_roles_operations.py +++ b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/operations/_trusted_access_roles_operations.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines,too-many-statements # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -6,10 +5,11 @@ # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar +from collections.abc import MutableMapping +from typing import Any, Callable, Optional, TypeVar import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -26,14 +26,12 @@ from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models -from ..._serialization import Serializer +from .._configuration import ContainerServiceClientConfiguration +from .._utils.serialization import Deserializer, Serializer -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +List = list _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -43,14 +41,14 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles", - ) # pylint: disable=line-too-long + ) path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), "location": _SERIALIZER.url("location", location, "str", min_length=1), @@ -73,22 +71,21 @@ class TrustedAccessRolesOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.mgmt.containerservice.v2024_07_01.ContainerServiceClient`'s + :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s :attr:`trusted_access_roles` attribute. """ models = _models - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]: + def list(self, location: str, **kwargs: Any) -> ItemPaged["_models.TrustedAccessRole"]: """List supported trusted access roles. List supported trusted access roles. @@ -96,17 +93,16 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessR :param location: The name of the Azure region. Required. :type location: str :return: An iterator like instance of either TrustedAccessRole or the result of cls(response) - :rtype: - ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2024_07_01.models.TrustedAccessRole] + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRole] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2024-07-01")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -135,7 +131,7 @@ def prepare_request(next_link=None): for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) - _next_request_params["api-version"] = self._api_version + _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) @@ -161,7 +157,11 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, error_format=ARMErrorFormat) + error = self._deserialize.failsafe_deserialize( + _models.ErrorResponse, + pipeline_response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py deleted file mode 100644 index 4f004a298ed..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._container_service_client import ContainerServiceClient -from ._version import VERSION - -__version__ = VERSION - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "ContainerServiceClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) - -_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py deleted file mode 100644 index 8a2cfba3ad4..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_configuration.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies -from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy - -from ._version import VERSION - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials import TokenCredential - - -class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for ContainerServiceClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param credential: Credential needed for the client to connect to Azure. Required. - :type credential: ~azure.core.credentials.TokenCredential - :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. - :type subscription_id: str - :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01") - - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - - self.credential = credential - self.subscription_id = subscription_id - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = ARMChallengeAuthenticationPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py deleted file mode 100644 index 37b41101ae1..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/_container_service_client.py +++ /dev/null @@ -1,179 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, TYPE_CHECKING -from typing_extensions import Self - -from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse -from azure.mgmt.core import ARMPipelineClient -from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy - -from . import models as _models -from .._serialization import Deserializer, Serializer -from ._configuration import ContainerServiceClientConfiguration -from .operations import ( - AgentPoolsOperations, - MachinesOperations, - MaintenanceConfigurationsOperations, - ManagedClustersOperations, - Operations, - PrivateEndpointConnectionsOperations, - PrivateLinkResourcesOperations, - ResolvePrivateLinkServiceIdOperations, - SnapshotsOperations, - TrustedAccessRoleBindingsOperations, - TrustedAccessRolesOperations, -) - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials import TokenCredential - - -class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - """The Container Service Client. - - :ivar operations: Operations operations - :vartype operations: azure.mgmt.containerservice.v2024_07_01.operations.Operations - :ivar managed_clusters: ManagedClustersOperations operations - :vartype managed_clusters: - azure.mgmt.containerservice.v2024_07_01.operations.ManagedClustersOperations - :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations - :vartype maintenance_configurations: - azure.mgmt.containerservice.v2024_07_01.operations.MaintenanceConfigurationsOperations - :ivar agent_pools: AgentPoolsOperations operations - :vartype agent_pools: azure.mgmt.containerservice.v2024_07_01.operations.AgentPoolsOperations - :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations - :vartype private_endpoint_connections: - azure.mgmt.containerservice.v2024_07_01.operations.PrivateEndpointConnectionsOperations - :ivar private_link_resources: PrivateLinkResourcesOperations operations - :vartype private_link_resources: - azure.mgmt.containerservice.v2024_07_01.operations.PrivateLinkResourcesOperations - :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations - :vartype resolve_private_link_service_id: - azure.mgmt.containerservice.v2024_07_01.operations.ResolvePrivateLinkServiceIdOperations - :ivar snapshots: SnapshotsOperations operations - :vartype snapshots: azure.mgmt.containerservice.v2024_07_01.operations.SnapshotsOperations - :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations - :vartype trusted_access_role_bindings: - azure.mgmt.containerservice.v2024_07_01.operations.TrustedAccessRoleBindingsOperations - :ivar trusted_access_roles: TrustedAccessRolesOperations operations - :vartype trusted_access_roles: - azure.mgmt.containerservice.v2024_07_01.operations.TrustedAccessRolesOperations - :ivar machines: MachinesOperations operations - :vartype machines: azure.mgmt.containerservice.v2024_07_01.operations.MachinesOperations - :param credential: Credential needed for the client to connect to Azure. Required. - :type credential: ~azure.core.credentials.TokenCredential - :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. - :type subscription_id: str - :param base_url: Service URL. Default value is "https://management.azure.com". - :type base_url: str - :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no - Retry-After header is present. - """ - - def __init__( - self, - credential: "TokenCredential", - subscription_id: str, - base_url: str = "https://management.azure.com", - **kwargs: Any - ) -> None: - self._config = ContainerServiceClientConfiguration( - credential=credential, subscription_id=subscription_id, **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - ARMAutoResourceProviderRegistrationPolicy(), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") - self.managed_clusters = ManagedClustersOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.maintenance_configurations = MaintenanceConfigurationsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.agent_pools = AgentPoolsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.private_endpoint_connections = PrivateEndpointConnectionsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.private_link_resources = PrivateLinkResourcesOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.snapshots = SnapshotsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.trusted_access_roles = TrustedAccessRolesOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") - - def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> Self: - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py deleted file mode 100644 index d14e96ddb36..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._container_service_client import ContainerServiceClient - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "ContainerServiceClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) - -_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py deleted file mode 100644 index b68fcb866fc..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_configuration.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies -from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy - -from .._version import VERSION - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - - -class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for ContainerServiceClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param credential: Credential needed for the client to connect to Azure. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. - :type subscription_id: str - :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01") - - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - - self.credential = credential - self.subscription_id = subscription_id - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py deleted file mode 100644 index 5d4c2c0b25c..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/aio/_container_service_client.py +++ /dev/null @@ -1,182 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable, TYPE_CHECKING -from typing_extensions import Self - -from azure.core.pipeline import policies -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.mgmt.core import AsyncARMPipelineClient -from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy - -from .. import models as _models -from ..._serialization import Deserializer, Serializer -from ._configuration import ContainerServiceClientConfiguration -from .operations import ( - AgentPoolsOperations, - MachinesOperations, - MaintenanceConfigurationsOperations, - ManagedClustersOperations, - Operations, - PrivateEndpointConnectionsOperations, - PrivateLinkResourcesOperations, - ResolvePrivateLinkServiceIdOperations, - SnapshotsOperations, - TrustedAccessRoleBindingsOperations, - TrustedAccessRolesOperations, -) - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - - -class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - """The Container Service Client. - - :ivar operations: Operations operations - :vartype operations: azure.mgmt.containerservice.v2024_07_01.aio.operations.Operations - :ivar managed_clusters: ManagedClustersOperations operations - :vartype managed_clusters: - azure.mgmt.containerservice.v2024_07_01.aio.operations.ManagedClustersOperations - :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations - :vartype maintenance_configurations: - azure.mgmt.containerservice.v2024_07_01.aio.operations.MaintenanceConfigurationsOperations - :ivar agent_pools: AgentPoolsOperations operations - :vartype agent_pools: - azure.mgmt.containerservice.v2024_07_01.aio.operations.AgentPoolsOperations - :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations - :vartype private_endpoint_connections: - azure.mgmt.containerservice.v2024_07_01.aio.operations.PrivateEndpointConnectionsOperations - :ivar private_link_resources: PrivateLinkResourcesOperations operations - :vartype private_link_resources: - azure.mgmt.containerservice.v2024_07_01.aio.operations.PrivateLinkResourcesOperations - :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations - :vartype resolve_private_link_service_id: - azure.mgmt.containerservice.v2024_07_01.aio.operations.ResolvePrivateLinkServiceIdOperations - :ivar snapshots: SnapshotsOperations operations - :vartype snapshots: azure.mgmt.containerservice.v2024_07_01.aio.operations.SnapshotsOperations - :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations - :vartype trusted_access_role_bindings: - azure.mgmt.containerservice.v2024_07_01.aio.operations.TrustedAccessRoleBindingsOperations - :ivar trusted_access_roles: TrustedAccessRolesOperations operations - :vartype trusted_access_roles: - azure.mgmt.containerservice.v2024_07_01.aio.operations.TrustedAccessRolesOperations - :ivar machines: MachinesOperations operations - :vartype machines: azure.mgmt.containerservice.v2024_07_01.aio.operations.MachinesOperations - :param credential: Credential needed for the client to connect to Azure. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. - :type subscription_id: str - :param base_url: Service URL. Default value is "https://management.azure.com". - :type base_url: str - :keyword api_version: Api Version. Default value is "2024-07-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - :keyword int polling_interval: Default waiting time between two polls for LRO operations if no - Retry-After header is present. - """ - - def __init__( - self, - credential: "AsyncTokenCredential", - subscription_id: str, - base_url: str = "https://management.azure.com", - **kwargs: Any - ) -> None: - self._config = ContainerServiceClientConfiguration( - credential=credential, subscription_id=subscription_id, **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - AsyncARMAutoResourceProviderRegistrationPolicy(), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.operations = Operations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") - self.managed_clusters = ManagedClustersOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.maintenance_configurations = MaintenanceConfigurationsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.agent_pools = AgentPoolsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.private_endpoint_connections = PrivateEndpointConnectionsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.private_link_resources = PrivateLinkResourcesOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.snapshots = SnapshotsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.trusted_access_roles = TrustedAccessRolesOperations( - self._client, self._config, self._serialize, self._deserialize, "2024-07-01" - ) - self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize, "2024-07-01") - - def _send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> Self: - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py deleted file mode 100644 index 8b172e7eb5c..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/models/__init__.py +++ /dev/null @@ -1,407 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._models_py3 import AbsoluteMonthlySchedule -from ._models_py3 import AgentPool -from ._models_py3 import AgentPoolAvailableVersions -from ._models_py3 import AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem -from ._models_py3 import AgentPoolDeleteMachinesParameter -from ._models_py3 import AgentPoolListResult -from ._models_py3 import AgentPoolNetworkProfile -from ._models_py3 import AgentPoolSecurityProfile -from ._models_py3 import AgentPoolUpgradeProfile -from ._models_py3 import AgentPoolUpgradeProfilePropertiesUpgradesItem -from ._models_py3 import AgentPoolUpgradeSettings -from ._models_py3 import AgentPoolWindowsProfile -from ._models_py3 import AzureKeyVaultKms -from ._models_py3 import CloudErrorBody -from ._models_py3 import ClusterUpgradeSettings -from ._models_py3 import CompatibleVersions -from ._models_py3 import ContainerServiceLinuxProfile -from ._models_py3 import ContainerServiceNetworkProfile -from ._models_py3 import ContainerServiceSshConfiguration -from ._models_py3 import ContainerServiceSshPublicKey -from ._models_py3 import CreationData -from ._models_py3 import CredentialResult -from ._models_py3 import CredentialResults -from ._models_py3 import DailySchedule -from ._models_py3 import DateSpan -from ._models_py3 import DelegatedResource -from ._models_py3 import EndpointDependency -from ._models_py3 import EndpointDetail -from ._models_py3 import ErrorAdditionalInfo -from ._models_py3 import ErrorDetail -from ._models_py3 import ErrorResponse -from ._models_py3 import ExtendedLocation -from ._models_py3 import IPTag -from ._models_py3 import IstioCertificateAuthority -from ._models_py3 import IstioComponents -from ._models_py3 import IstioEgressGateway -from ._models_py3 import IstioIngressGateway -from ._models_py3 import IstioPluginCertificateAuthority -from ._models_py3 import IstioServiceMesh -from ._models_py3 import KubeletConfig -from ._models_py3 import KubernetesPatchVersion -from ._models_py3 import KubernetesVersion -from ._models_py3 import KubernetesVersionCapabilities -from ._models_py3 import KubernetesVersionListResult -from ._models_py3 import LinuxOSConfig -from ._models_py3 import Machine -from ._models_py3 import MachineIpAddress -from ._models_py3 import MachineListResult -from ._models_py3 import MachineNetworkProperties -from ._models_py3 import MachineProperties -from ._models_py3 import MaintenanceConfiguration -from ._models_py3 import MaintenanceConfigurationListResult -from ._models_py3 import MaintenanceWindow -from ._models_py3 import ManagedCluster -from ._models_py3 import ManagedClusterAADProfile -from ._models_py3 import ManagedClusterAPIServerAccessProfile -from ._models_py3 import ManagedClusterAccessProfile -from ._models_py3 import ManagedClusterAddonProfile -from ._models_py3 import ManagedClusterAddonProfileIdentity -from ._models_py3 import ManagedClusterAgentPoolProfile -from ._models_py3 import ManagedClusterAgentPoolProfileProperties -from ._models_py3 import ManagedClusterAutoUpgradeProfile -from ._models_py3 import ManagedClusterAzureMonitorProfile -from ._models_py3 import ManagedClusterAzureMonitorProfileKubeStateMetrics -from ._models_py3 import ManagedClusterAzureMonitorProfileMetrics -from ._models_py3 import ManagedClusterCostAnalysis -from ._models_py3 import ManagedClusterHTTPProxyConfig -from ._models_py3 import ManagedClusterIdentity -from ._models_py3 import ManagedClusterIngressProfile -from ._models_py3 import ManagedClusterIngressProfileWebAppRouting -from ._models_py3 import ManagedClusterListResult -from ._models_py3 import ManagedClusterLoadBalancerProfile -from ._models_py3 import ManagedClusterLoadBalancerProfileManagedOutboundIPs -from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPPrefixes -from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPs -from ._models_py3 import ManagedClusterManagedOutboundIPProfile -from ._models_py3 import ManagedClusterMetricsProfile -from ._models_py3 import ManagedClusterNATGatewayProfile -from ._models_py3 import ManagedClusterOIDCIssuerProfile -from ._models_py3 import ManagedClusterPodIdentity -from ._models_py3 import ManagedClusterPodIdentityException -from ._models_py3 import ManagedClusterPodIdentityProfile -from ._models_py3 import ManagedClusterPodIdentityProvisioningError -from ._models_py3 import ManagedClusterPodIdentityProvisioningErrorBody -from ._models_py3 import ManagedClusterPodIdentityProvisioningInfo -from ._models_py3 import ManagedClusterPoolUpgradeProfile -from ._models_py3 import ManagedClusterPoolUpgradeProfileUpgradesItem -from ._models_py3 import ManagedClusterPropertiesAutoScalerProfile -from ._models_py3 import ManagedClusterSKU -from ._models_py3 import ManagedClusterSecurityProfile -from ._models_py3 import ManagedClusterSecurityProfileDefender -from ._models_py3 import ManagedClusterSecurityProfileDefenderSecurityMonitoring -from ._models_py3 import ManagedClusterSecurityProfileImageCleaner -from ._models_py3 import ManagedClusterSecurityProfileWorkloadIdentity -from ._models_py3 import ManagedClusterServicePrincipalProfile -from ._models_py3 import ManagedClusterStorageProfile -from ._models_py3 import ManagedClusterStorageProfileBlobCSIDriver -from ._models_py3 import ManagedClusterStorageProfileDiskCSIDriver -from ._models_py3 import ManagedClusterStorageProfileFileCSIDriver -from ._models_py3 import ManagedClusterStorageProfileSnapshotController -from ._models_py3 import ManagedClusterUpgradeProfile -from ._models_py3 import ManagedClusterWindowsProfile -from ._models_py3 import ManagedClusterWorkloadAutoScalerProfile -from ._models_py3 import ManagedClusterWorkloadAutoScalerProfileKeda -from ._models_py3 import ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler -from ._models_py3 import ManagedServiceIdentityUserAssignedIdentitiesValue -from ._models_py3 import MeshRevision -from ._models_py3 import MeshRevisionProfile -from ._models_py3 import MeshRevisionProfileList -from ._models_py3 import MeshRevisionProfileProperties -from ._models_py3 import MeshUpgradeProfile -from ._models_py3 import MeshUpgradeProfileList -from ._models_py3 import MeshUpgradeProfileProperties -from ._models_py3 import OperationListResult -from ._models_py3 import OperationValue -from ._models_py3 import OutboundEnvironmentEndpoint -from ._models_py3 import OutboundEnvironmentEndpointCollection -from ._models_py3 import PortRange -from ._models_py3 import PowerState -from ._models_py3 import PrivateEndpoint -from ._models_py3 import PrivateEndpointConnection -from ._models_py3 import PrivateEndpointConnectionListResult -from ._models_py3 import PrivateLinkResource -from ._models_py3 import PrivateLinkResourcesListResult -from ._models_py3 import PrivateLinkServiceConnectionState -from ._models_py3 import ProxyResource -from ._models_py3 import RelativeMonthlySchedule -from ._models_py3 import Resource -from ._models_py3 import ResourceReference -from ._models_py3 import RunCommandRequest -from ._models_py3 import RunCommandResult -from ._models_py3 import Schedule -from ._models_py3 import ServiceMeshProfile -from ._models_py3 import Snapshot -from ._models_py3 import SnapshotListResult -from ._models_py3 import SubResource -from ._models_py3 import SysctlConfig -from ._models_py3 import SystemData -from ._models_py3 import TagsObject -from ._models_py3 import TimeInWeek -from ._models_py3 import TimeSpan -from ._models_py3 import TrackedResource -from ._models_py3 import TrustedAccessRole -from ._models_py3 import TrustedAccessRoleBinding -from ._models_py3 import TrustedAccessRoleBindingListResult -from ._models_py3 import TrustedAccessRoleListResult -from ._models_py3 import TrustedAccessRoleRule -from ._models_py3 import UpgradeOverrideSettings -from ._models_py3 import UserAssignedIdentity -from ._models_py3 import WeeklySchedule -from ._models_py3 import WindowsGmsaProfile - -from ._container_service_client_enums import AgentPoolMode -from ._container_service_client_enums import AgentPoolType -from ._container_service_client_enums import BackendPoolType -from ._container_service_client_enums import Code -from ._container_service_client_enums import ConnectionStatus -from ._container_service_client_enums import CreatedByType -from ._container_service_client_enums import Expander -from ._container_service_client_enums import ExtendedLocationTypes -from ._container_service_client_enums import Format -from ._container_service_client_enums import GPUInstanceProfile -from ._container_service_client_enums import IpFamily -from ._container_service_client_enums import IstioIngressGatewayMode -from ._container_service_client_enums import KeyVaultNetworkAccessTypes -from ._container_service_client_enums import KubeletDiskType -from ._container_service_client_enums import KubernetesSupportPlan -from ._container_service_client_enums import LicenseType -from ._container_service_client_enums import LoadBalancerSku -from ._container_service_client_enums import ManagedClusterPodIdentityProvisioningState -from ._container_service_client_enums import ManagedClusterSKUName -from ._container_service_client_enums import ManagedClusterSKUTier -from ._container_service_client_enums import NetworkDataplane -from ._container_service_client_enums import NetworkMode -from ._container_service_client_enums import NetworkPlugin -from ._container_service_client_enums import NetworkPluginMode -from ._container_service_client_enums import NetworkPolicy -from ._container_service_client_enums import NodeOSUpgradeChannel -from ._container_service_client_enums import OSDiskType -from ._container_service_client_enums import OSSKU -from ._container_service_client_enums import OSType -from ._container_service_client_enums import OutboundType -from ._container_service_client_enums import PrivateEndpointConnectionProvisioningState -from ._container_service_client_enums import Protocol -from ._container_service_client_enums import PublicNetworkAccess -from ._container_service_client_enums import ResourceIdentityType -from ._container_service_client_enums import ScaleDownMode -from ._container_service_client_enums import ScaleSetEvictionPolicy -from ._container_service_client_enums import ScaleSetPriority -from ._container_service_client_enums import ServiceMeshMode -from ._container_service_client_enums import SnapshotType -from ._container_service_client_enums import TrustedAccessRoleBindingProvisioningState -from ._container_service_client_enums import Type -from ._container_service_client_enums import UpgradeChannel -from ._container_service_client_enums import WeekDay -from ._container_service_client_enums import WorkloadRuntime -from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AbsoluteMonthlySchedule", - "AgentPool", - "AgentPoolAvailableVersions", - "AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem", - "AgentPoolDeleteMachinesParameter", - "AgentPoolListResult", - "AgentPoolNetworkProfile", - "AgentPoolSecurityProfile", - "AgentPoolUpgradeProfile", - "AgentPoolUpgradeProfilePropertiesUpgradesItem", - "AgentPoolUpgradeSettings", - "AgentPoolWindowsProfile", - "AzureKeyVaultKms", - "CloudErrorBody", - "ClusterUpgradeSettings", - "CompatibleVersions", - "ContainerServiceLinuxProfile", - "ContainerServiceNetworkProfile", - "ContainerServiceSshConfiguration", - "ContainerServiceSshPublicKey", - "CreationData", - "CredentialResult", - "CredentialResults", - "DailySchedule", - "DateSpan", - "DelegatedResource", - "EndpointDependency", - "EndpointDetail", - "ErrorAdditionalInfo", - "ErrorDetail", - "ErrorResponse", - "ExtendedLocation", - "IPTag", - "IstioCertificateAuthority", - "IstioComponents", - "IstioEgressGateway", - "IstioIngressGateway", - "IstioPluginCertificateAuthority", - "IstioServiceMesh", - "KubeletConfig", - "KubernetesPatchVersion", - "KubernetesVersion", - "KubernetesVersionCapabilities", - "KubernetesVersionListResult", - "LinuxOSConfig", - "Machine", - "MachineIpAddress", - "MachineListResult", - "MachineNetworkProperties", - "MachineProperties", - "MaintenanceConfiguration", - "MaintenanceConfigurationListResult", - "MaintenanceWindow", - "ManagedCluster", - "ManagedClusterAADProfile", - "ManagedClusterAPIServerAccessProfile", - "ManagedClusterAccessProfile", - "ManagedClusterAddonProfile", - "ManagedClusterAddonProfileIdentity", - "ManagedClusterAgentPoolProfile", - "ManagedClusterAgentPoolProfileProperties", - "ManagedClusterAutoUpgradeProfile", - "ManagedClusterAzureMonitorProfile", - "ManagedClusterAzureMonitorProfileKubeStateMetrics", - "ManagedClusterAzureMonitorProfileMetrics", - "ManagedClusterCostAnalysis", - "ManagedClusterHTTPProxyConfig", - "ManagedClusterIdentity", - "ManagedClusterIngressProfile", - "ManagedClusterIngressProfileWebAppRouting", - "ManagedClusterListResult", - "ManagedClusterLoadBalancerProfile", - "ManagedClusterLoadBalancerProfileManagedOutboundIPs", - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes", - "ManagedClusterLoadBalancerProfileOutboundIPs", - "ManagedClusterManagedOutboundIPProfile", - "ManagedClusterMetricsProfile", - "ManagedClusterNATGatewayProfile", - "ManagedClusterOIDCIssuerProfile", - "ManagedClusterPodIdentity", - "ManagedClusterPodIdentityException", - "ManagedClusterPodIdentityProfile", - "ManagedClusterPodIdentityProvisioningError", - "ManagedClusterPodIdentityProvisioningErrorBody", - "ManagedClusterPodIdentityProvisioningInfo", - "ManagedClusterPoolUpgradeProfile", - "ManagedClusterPoolUpgradeProfileUpgradesItem", - "ManagedClusterPropertiesAutoScalerProfile", - "ManagedClusterSKU", - "ManagedClusterSecurityProfile", - "ManagedClusterSecurityProfileDefender", - "ManagedClusterSecurityProfileDefenderSecurityMonitoring", - "ManagedClusterSecurityProfileImageCleaner", - "ManagedClusterSecurityProfileWorkloadIdentity", - "ManagedClusterServicePrincipalProfile", - "ManagedClusterStorageProfile", - "ManagedClusterStorageProfileBlobCSIDriver", - "ManagedClusterStorageProfileDiskCSIDriver", - "ManagedClusterStorageProfileFileCSIDriver", - "ManagedClusterStorageProfileSnapshotController", - "ManagedClusterUpgradeProfile", - "ManagedClusterWindowsProfile", - "ManagedClusterWorkloadAutoScalerProfile", - "ManagedClusterWorkloadAutoScalerProfileKeda", - "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler", - "ManagedServiceIdentityUserAssignedIdentitiesValue", - "MeshRevision", - "MeshRevisionProfile", - "MeshRevisionProfileList", - "MeshRevisionProfileProperties", - "MeshUpgradeProfile", - "MeshUpgradeProfileList", - "MeshUpgradeProfileProperties", - "OperationListResult", - "OperationValue", - "OutboundEnvironmentEndpoint", - "OutboundEnvironmentEndpointCollection", - "PortRange", - "PowerState", - "PrivateEndpoint", - "PrivateEndpointConnection", - "PrivateEndpointConnectionListResult", - "PrivateLinkResource", - "PrivateLinkResourcesListResult", - "PrivateLinkServiceConnectionState", - "ProxyResource", - "RelativeMonthlySchedule", - "Resource", - "ResourceReference", - "RunCommandRequest", - "RunCommandResult", - "Schedule", - "ServiceMeshProfile", - "Snapshot", - "SnapshotListResult", - "SubResource", - "SysctlConfig", - "SystemData", - "TagsObject", - "TimeInWeek", - "TimeSpan", - "TrackedResource", - "TrustedAccessRole", - "TrustedAccessRoleBinding", - "TrustedAccessRoleBindingListResult", - "TrustedAccessRoleListResult", - "TrustedAccessRoleRule", - "UpgradeOverrideSettings", - "UserAssignedIdentity", - "WeeklySchedule", - "WindowsGmsaProfile", - "AgentPoolMode", - "AgentPoolType", - "BackendPoolType", - "Code", - "ConnectionStatus", - "CreatedByType", - "Expander", - "ExtendedLocationTypes", - "Format", - "GPUInstanceProfile", - "IpFamily", - "IstioIngressGatewayMode", - "KeyVaultNetworkAccessTypes", - "KubeletDiskType", - "KubernetesSupportPlan", - "LicenseType", - "LoadBalancerSku", - "ManagedClusterPodIdentityProvisioningState", - "ManagedClusterSKUName", - "ManagedClusterSKUTier", - "NetworkDataplane", - "NetworkMode", - "NetworkPlugin", - "NetworkPluginMode", - "NetworkPolicy", - "NodeOSUpgradeChannel", - "OSDiskType", - "OSSKU", - "OSType", - "OutboundType", - "PrivateEndpointConnectionProvisioningState", - "Protocol", - "PublicNetworkAccess", - "ResourceIdentityType", - "ScaleDownMode", - "ScaleSetEvictionPolicy", - "ScaleSetPriority", - "ServiceMeshMode", - "SnapshotType", - "TrustedAccessRoleBindingProvisioningState", - "Type", - "UpgradeChannel", - "WeekDay", - "WorkloadRuntime", -] -__all__.extend([p for p in _patch_all if p not in __all__]) -_patch_sdk() diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py deleted file mode 100644 index f7dd3251033..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/operations/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed b/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed deleted file mode 100644 index e5aff4f83af..00000000000 --- a/src/dataprotection/azext_dataprotection/vendored_sdks/azure_mgmt_containerservice/v2024_07_01/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file From 4ea9e98e1f5c1001d68c1631693f18eba379d864 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:23:37 +0530 Subject: [PATCH 11/24] [AKS Preview] Revert backup changes - moved to separate branch aksbackup-aks-preview --- src/aks-preview/azext_aks_preview/_params.py | 23 -------------- src/aks-preview/azext_aks_preview/custom.py | 4 --- .../managed_cluster_decorator.py | 31 ------------------- 3 files changed, 58 deletions(-) diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index 9890ce47080..dd7d9524980 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -23,21 +23,6 @@ validate_nat_gateway_idle_timeout, validate_nat_gateway_managed_outbound_ip_count, ) - -# Import backup strategy constants from dataprotection extension -from azure.cli.core.extension.operations import add_extension_to_path -add_extension_to_path("dataprotection") -from azext_dataprotection.manual._consts import ( - CONST_AKS_BACKUP_STRATEGIES, - CONST_BACKUP_STRATEGY_WEEK, - CONST_BACKUP_STRATEGY_MONTH, - CONST_BACKUP_STRATEGY_IMMUTABLE, - CONST_BACKUP_STRATEGY_DISASTER_RECOVERY, - CONST_BACKUP_STRATEGY_CUSTOM, -) - -backup_presets = CONST_AKS_BACKUP_STRATEGIES - from azure.cli.core.commands.parameters import ( edge_zone_type, file_type, @@ -178,7 +163,6 @@ CONST_UPGRADE_STRATEGY_ROLLING, CONST_UPGRADE_STRATEGY_BLUE_GREEN ) -from azure.cli.core.commands.validators import validate_file_or_dict from azext_aks_preview._validators import ( validate_acr, @@ -1756,13 +1740,6 @@ def load_arguments(self, _): 'by that action.' ) ) - c.argument("enable_backup", help="Enable backup for the cluster", is_preview=True, action="store_true") - c.argument("backup_strategy", arg_type=get_enum_type(backup_presets), help="Backup strategy for the cluster. Defaults to Week.", is_preview=True) - c.argument("backup_configuration_file", type=validate_file_or_dict, - options_list=['--backup-configuration-file', '-f'], - help="Path to backup configuration file (JSON) or inline JSON string.", is_preview=True) - # In update scenario, use emtpy str as default. - c.argument('ssh_access', arg_type=get_enum_type(ssh_accesses), is_preview=True) c.argument('enable_static_egress_gateway', is_preview=True, action='store_true') c.argument('disable_static_egress_gateway', is_preview=True, action='store_true') c.argument("enable_imds_restriction", action="store_true", is_preview=True) diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index efd8316d92b..83a3d49e03e 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -1394,10 +1394,6 @@ def aks_update( # IMDS restriction enable_imds_restriction=False, disable_imds_restriction=False, - # Backup - enable_backup=False, - backup_strategy=None, - backup_configuration_parameters=None, migrate_vmas_to_vms=False, enable_upstream_kubescheduler_user_configuration=False, disable_upstream_kubescheduler_user_configuration=False, diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index e68b57657b5..fae5f33faf5 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -6,7 +6,6 @@ # pylint: disable=too-many-lines import copy import datetime -import json import os from types import SimpleNamespace from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union @@ -7374,39 +7373,9 @@ def update_mc_profile_preview(self) -> ManagedCluster: mc = self.update_upstream_kubescheduler_user_configuration(mc) # update ManagedSystem pools, must at end mc = self.update_managed_system_pools(mc) - # set up backup - mc = self.set_up_backup(mc) return mc - def set_up_backup(self, mc: ManagedCluster) -> ManagedCluster: - - enable_backup = self.context.raw_param.get("enable_backup") - if enable_backup: - # Validate that dataprotection extension is installed - try: - from azure.cli.core.extension.operations import add_extension_to_path - add_extension_to_path("dataprotection") - from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper - except (ImportError, ModuleNotFoundError): - raise CLIError( - "The 'dataprotection' extension is required for AKS backup functionality.\n" - "Please install it using: az extension add --name dataprotection" - ) - - backup_strategy = self.context.raw_param.get("backup_strategy") - backup_configuration_file = self.context.raw_param.get("backup_configuration_file") - - # Build the cluster resource ID - cluster_resource_id = ( - f"/subscriptions/{self.context.get_subscription_id()}" - f"/resourceGroups/{self.context.get_resource_group_name()}" - f"/providers/Microsoft.ContainerService/managedClusters/{self.context.get_name()}" - ) - - dataprotection_enable_backup_helper(self.cmd, str(cluster_resource_id), backup_strategy, backup_configuration_file) - return mc - def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: """Helper function to check if postprocessing is required after sending a PUT request to create the cluster. From d65285ea398688687ce7a291deda7e91391675c3 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Feb 2026 12:35:24 +0530 Subject: [PATCH 12/24] linting Signed-off-by: Anshul Ahuja --- src/dataprotection/HISTORY.rst | 5 + .../azext_dataprotection/manual/_help.py | 15 + .../azext_dataprotection/manual/_params.py | 8 +- .../manual/aks/aks_helper.py | 393 +++++++++--------- .../azext_dataprotection/manual/commands.py | 3 +- .../azext_dataprotection/manual/custom.py | 31 +- .../azext_dataprotection/manual/enums.py | 2 +- src/dataprotection/setup.py | 2 +- 8 files changed, 240 insertions(+), 219 deletions(-) diff --git a/src/dataprotection/HISTORY.rst b/src/dataprotection/HISTORY.rst index 81a31859a51..3cd1325fac7 100644 --- a/src/dataprotection/HISTORY.rst +++ b/src/dataprotection/HISTORY.rst @@ -2,6 +2,11 @@ Release History =============== +1.9.0 ++++++ +* `az dataprotection enable-backup trigger`: New command to enable backup for AKS clusters with a single command. Supports preset backup strategies (Week, Month, Immutable, DisasterRecovery) and Custom strategy with user-provided configuration. +* Added vendored SDKs: `azure-mgmt-containerservice` (40.2.0), `azure-mgmt-kubernetesconfiguration` (3.1.0), `azure-mgmt-resourcegraph` (8.0.0). + 1.8.0 +++++ * `az dataprotection backup-instance update`: New parameter: `--backup-configuration` to update AKS datasource parameters. diff --git a/src/dataprotection/azext_dataprotection/manual/_help.py b/src/dataprotection/azext_dataprotection/manual/_help.py index 8774ac429f5..6dd7c8d0167 100644 --- a/src/dataprotection/azext_dataprotection/manual/_help.py +++ b/src/dataprotection/azext_dataprotection/manual/_help.py @@ -291,3 +291,18 @@ - name: List of Recovery Points in a Vault text: az dataprotection recovery-point list --backup-instance-name "sample_biname-00000000-0000-0000-0000-000000000000" --resource-group "sample_rg" --vault-name "sample_vault" """ + +helps['dataprotection enable-backup'] = """ + type: group + short-summary: Enable backup for Azure resources. +""" + +helps['dataprotection enable-backup trigger'] = """ + type: command + short-summary: Enable backup for an AKS cluster by setting up all required resources including backup vault, policy, storage account, extension, and trusted access. + examples: + - name: Enable backup for an AKS cluster with default Week strategy + text: az dataprotection enable-backup trigger --datasource-type AzureKubernetesService --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} + - name: Enable backup with Month strategy + text: az dataprotection enable-backup trigger --datasource-type AzureKubernetesService --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} --backup-strategy Month +""" diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index 7eec015f616..7ca86045e42 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -182,16 +182,16 @@ def load_arguments(self, _): 'json-string/@json-file. Required when --operation is Backup') c.argument('restore_request_object', type=validate_file_or_dict, help='Request body for operation "Restore" Expected value: ' 'json-string/@json-file. Required when --operation is Restore') - - ## Enable Backup command + + # Enable Backup command with self.argument_context('dataprotection enable-backup trigger') as c: c.argument('datasource_type', type=str, help="The type of datasource to be backed up. Supported values: AzureKubernetesService.") c.argument('datasource_id', type=str, help="The full ARM resource ID of the datasource to be backed up.") - c.argument('backup_strategy', arg_type=get_enum_type(get_all_backup_strategies()), + c.argument('backup_strategy', arg_type=get_enum_type(get_all_backup_strategies()), help="Backup strategy preset. For AzureKubernetesService: Week (7-day retention), Month (30-day retention), " "Immutable (7-day Op + 90-day Vault Tier), DisasterRecovery (GRS+CRR), Custom (bring your own vault/policy). " "Default: Week.") - c.argument('backup_configuration_file', type=validate_file_or_dict, + c.argument('backup_configuration_file', type=validate_file_or_dict, options_list=['--backup-configuration-file', '-f'], help="Path to backup configuration file (JSON) or inline JSON string. " "Available settings: storageAccountResourceId, blobContainerName, backupResourceGroupId, " diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 54d68ef82f2..8d07f7bdf04 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -9,10 +9,10 @@ AKS_BACKUP_TAG_KEY = "AKSAzureBackup" -def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", max_retries=3, retry_delay=10): +def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", max_retries=3, retry_delay=10): """ Check if a role assignment already exists, and create it if not. - + Args: cmd: CLI command context role: Role name (e.g., 'Contributor', 'Reader', 'Storage Blob Data Contributor') @@ -21,14 +21,14 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" identity_name: Friendly name of the identity for error messages max_retries: Max retries for transient failures (like identity not propagated yet) retry_delay: Delay in seconds between retries - + Returns: True if role was assigned (new or existing), raises on failure """ import time from azure.cli.command_modules.role.custom import list_role_assignments, create_role_assignment from azure.core.exceptions import HttpResponseError - + # Check if role assignment already exists try: existing_assignments = list_role_assignments( @@ -38,14 +38,14 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" scope=scope, include_inherited=True ) - + if existing_assignments: print(f"\tRole '{role}' already assigned to {identity_name}") return True except Exception: # If we can't list, we'll try to create and handle any errors there pass - + # Try to create the role assignment with retries for transient failures last_error = None for attempt in range(max_retries): @@ -61,12 +61,12 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" except (HttpResponseError, Exception) as e: error_message = str(e) last_error = error_message - + # Check if this is a "already exists" conflict (409) if "already exists" in error_message.lower() or "conflict" in error_message.lower(): print(f"\tRole '{role}' already assigned to {identity_name}") return True - + # Check if this is a permission/authorization error (not retryable) if "authorization" in error_message.lower() or "forbidden" in error_message.lower() or "permission" in error_message.lower(): raise InvalidArgumentValueError( @@ -76,17 +76,17 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n\n" f"After the role is assigned, re-run this command." ) - + # Check if this is a "principal not found" error (retryable - identity propagation) if "cannot find" in error_message.lower() or "does not exist" in error_message.lower() or "principal" in error_message.lower(): if attempt < max_retries - 1: print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") time.sleep(retry_delay) continue - + # For other errors, don't retry break - + # If we get here, we've exhausted retries or hit a non-retryable error raise InvalidArgumentValueError( f"Failed to assign '{role}' role to {identity_name}.\n" @@ -97,10 +97,10 @@ def __check_and_assign_role(cmd, role, assignee, scope, identity_name="identity" ) -def __validate_request(datasource_id, backup_strategy, configuration_params): +def _validate_request(datasource_id, backup_strategy, configuration_params): """ Validate the request parameters. Raises InvalidArgumentValueError on validation failure. - + Args: datasource_id: Full ARM resource ID of the AKS cluster backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) @@ -115,14 +115,14 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): # Ensure configuration_params is a dict if configuration_params is None: configuration_params = {} - + # Parse if string if isinstance(configuration_params, str): try: json.loads(configuration_params) except json.JSONDecodeError: raise InvalidArgumentValueError("Invalid JSON in backup-configuration-file") - + # Validate Custom strategy requirements if backup_strategy == 'Custom': if not configuration_params.get("backupVaultId"): @@ -147,7 +147,7 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): f"backupResourceGroupId must be in the same subscription as the cluster. " f"Cluster subscription: {cluster_subscription_id}, Resource group subscription: {rg_parts['subscription']}" ) - + storage_account_id = configuration_params.get("storageAccountResourceId") if storage_account_id: sa_parts = parse_resource_id(storage_account_id) @@ -156,7 +156,7 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): f"storageAccountResourceId must be in the same subscription as the cluster. " f"Cluster subscription: {cluster_subscription_id}, Storage account subscription: {sa_parts['subscription']}" ) - + backup_vault_id = configuration_params.get("backupVaultId") if backup_vault_id: vault_parts = parse_resource_id(backup_vault_id) @@ -167,25 +167,25 @@ def __validate_request(datasource_id, backup_strategy, configuration_params): ) -def __check_existing_backup_instance(resource_client, datasource_id, cluster_name): +def _check_existing_backup_instance(resource_client, datasource_id, cluster_name): """ Check if a backup instance already exists for this cluster using extension routing. - + Calls: GET {datasource_id}/providers/Microsoft.DataProtection/backupInstances - + Returns: None if no backup instance exists, raises error with details if one exists """ - print(f"\tChecking for existing backup configuration...") - + print("\tChecking for existing backup configuration...") + try: # Use extension routing to query backup instances on the cluster extension_resource_id = f"{datasource_id}/providers/Microsoft.DataProtection/backupInstances" response = resource_client.resources.get_by_id( - extension_resource_id, + extension_resource_id, api_version="2024-04-01" ) - + # Parse the response to get backup instances list bi_list = [] if hasattr(response, 'value'): @@ -194,17 +194,17 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam props = response.additional_properties if isinstance(props, dict) and 'value' in props: bi_list = props['value'] if props['value'] else [] - + # If list is empty, no backup instance exists if not bi_list: - print(f"\tNo existing backup instance found") + print("\tNo existing backup instance found") return None - + # Get details of the first backup instance bi = bi_list[0] if isinstance(bi_list, list) else bi_list bi_id = bi.get('id', 'Unknown') if isinstance(bi, dict) else getattr(bi, 'id', 'Unknown') bi_name = bi.get('name', 'Unknown') if isinstance(bi, dict) else getattr(bi, 'name', 'Unknown') - + # Get protection status from properties bi_properties = bi.get('properties', {}) if isinstance(bi, dict) else getattr(bi, 'properties', {}) if isinstance(bi_properties, dict): @@ -213,7 +213,7 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam else: protection_status = getattr(bi_properties, 'current_protection_state', 'Unknown') protection_error = getattr(bi_properties, 'protection_error_details', None) - + # Parse vault info from the BI resource ID # Format: /subscriptions/.../resourceGroups/.../providers/Microsoft.DataProtection/backupVaults/{vault}/backupInstances/{bi} vault_name = "Unknown" @@ -222,19 +222,19 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam bi_parts = parse_resource_id(bi_id) vault_name = bi_parts.get('name', 'Unknown') vault_rg = bi_parts.get('resource_group', 'Unknown') - - print(f"\tFound existing backup instance!") + + print("\tFound existing backup instance!") print(f"\t\t- Backup Instance: {bi_name}") print(f"\t\t- Backup Vault: {vault_name}") print(f"\t\t- Resource Group: {vault_rg}") print(f"\t\t- Protection State: {protection_status}") - + error_info = "" if protection_error: error_msg = protection_error.get('message', str(protection_error)) if isinstance(protection_error, dict) else str(protection_error) print(f"\t\t- Error Details: {error_msg[:100]}..." if len(str(error_msg)) > 100 else f" - Error Details: {error_msg}") error_info = f"\n Protection Error: {error_msg}\n" - + raise InvalidArgumentValueError( f"Cluster '{cluster_name}' is already protected by a backup instance.\n\n" f"Existing Backup Configuration:\n" @@ -250,7 +250,7 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam f" --yes\n\n" f"Then re-run this command." ) - + except InvalidArgumentValueError: # Re-raise our own error raise @@ -258,32 +258,32 @@ def __check_existing_backup_instance(resource_client, datasource_id, cluster_nam # 404 or other errors mean no backup instance exists - that's fine error_str = str(e).lower() if "not found" in error_str or "404" in error_str or "does not exist" in error_str: - print(f"\tNo existing backup instance found") + print("\tNo existing backup instance found") return None # For other errors, log and continue (don't block on extension routing failures) print(f"\tCould not check for existing backup (will proceed): {str(e)[:100]}") return None - - print(f"\tNo existing backup instance found") + + print("\tNo existing backup instance found") return None -def __validate_cluster(resource_client, datasource_id, cluster_name): +def _validate_cluster(resource_client, datasource_id, cluster_name): """Validate the AKS cluster exists and get its details.""" cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") cluster_location = cluster_resource.location print(f"\tCluster: {cluster_name}") print(f"\tLocation: {cluster_location}") - print(f"\t[OK] Cluster validated") + print("\t[OK] Cluster validated") return cluster_resource, cluster_location -def __find_existing_backup_resource_group(resource_client, cluster_location): +def _find_existing_backup_resource_group(resource_client, cluster_location): """ Search for an existing AKS backup resource group in the subscription by tag. - + Looks for resource groups with tag: AKSAzureBackup = - + Returns: resource_group if found, None otherwise """ @@ -301,7 +301,7 @@ def __find_existing_backup_resource_group(resource_client, cluster_location): return None -def __setup_resource_group(cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_identity_principal_id, resource_tags): +def _setup_resource_group(cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_identity_principal_id, resource_tags): """Create or use backup resource group.""" if backup_resource_group_id: backup_resource_group_name = parse_resource_id(backup_resource_group_id)['resource_group'] @@ -316,43 +316,43 @@ def __setup_resource_group(cmd, resource_client, backup_resource_group_id, clust else: # Search for existing backup resource group with matching tag print(f"\tSearching for existing AKS backup resource group in region {cluster_location}...") - backup_resource_group = __find_existing_backup_resource_group(resource_client, cluster_location) - + backup_resource_group = _find_existing_backup_resource_group(resource_client, cluster_location) + if backup_resource_group: # Found existing resource group - reuse it backup_resource_group_name = backup_resource_group.name print(f"\tFound existing backup resource group: {backup_resource_group_name}") else: # Create new resource group with AKS backup tag - backup_resource_group_name = __generate_backup_resource_group_name(cluster_location) + backup_resource_group_name = _generate_backup_resource_group_name(cluster_location) print(f"\tCreating resource group: {backup_resource_group_name}") - + # Build tags - include AKS backup tag plus any user-provided tags rg_tags = {AKS_BACKUP_TAG_KEY: cluster_location} if resource_tags: rg_tags.update(resource_tags) - + rg_params = {"location": cluster_location, "tags": rg_tags} backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, rg_params) - + print(f"\tResource Group: {backup_resource_group.id}") - __check_and_assign_role( + _check_and_assign_role( cmd, role="Contributor", assignee=cluster_identity_principal_id, scope=backup_resource_group.id, identity_name="cluster identity") - print(f"\t[OK] Resource group ready") - + print("\t[OK] Resource group ready") + return backup_resource_group, backup_resource_group_name -def __find_existing_backup_storage_account(storage_client, cluster_location): +def _find_existing_backup_storage_account(storage_client, cluster_location): """ Search for an existing AKS backup storage account in the subscription by tag. - + Looks for storage accounts with tag: AKSAzureBackup = - + Returns: tuple: (storage_account, resource_group_name) if found, (None, None) otherwise """ @@ -372,13 +372,13 @@ def __find_existing_backup_storage_account(storage_client, cluster_location): return None, None -def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags): +def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags): """Create or use storage account.""" from azure.mgmt.storage import StorageManagementClient - + storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) storage_account_rg = backup_resource_group_name # Default to backup RG - + if storage_account_id: # Use provided storage account sa_parts = parse_resource_id(storage_account_id) @@ -386,12 +386,12 @@ def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, bl storage_account_rg = sa_parts['resource_group'] print(f"\tUsing provided storage account: {backup_storage_account_name}") backup_storage_account = storage_client.storage_accounts.get_properties(storage_account_rg, backup_storage_account_name) - backup_storage_account_container_name = blob_container_name if blob_container_name else __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + backup_storage_account_container_name = blob_container_name if blob_container_name else _generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) else: # Search for existing backup storage account with matching tag print(f"\tSearching for existing AKS backup storage account in region {cluster_location}...") - backup_storage_account, existing_rg = __find_existing_backup_storage_account(storage_client, cluster_location) - + backup_storage_account, existing_rg = _find_existing_backup_storage_account(storage_client, cluster_location) + if backup_storage_account: # Found existing storage account - reuse it backup_storage_account_name = backup_storage_account.name @@ -399,14 +399,14 @@ def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, bl print(f"\tFound existing backup storage account: {backup_storage_account_name}") else: # Create new storage account with AKS backup tag - backup_storage_account_name = __generate_backup_storage_account_name(cluster_location) + backup_storage_account_name = _generate_backup_storage_account_name(cluster_location) print(f"\tCreating storage account: {backup_storage_account_name}") - + # Build tags - include AKS backup tag plus any user-provided tags sa_tags = {AKS_BACKUP_TAG_KEY: cluster_location} if resource_tags: sa_tags.update(resource_tags) - + storage_params = { "location": cluster_location, "kind": "StorageV2", @@ -418,20 +418,20 @@ def __setup_storage_account(cmd, cluster_subscription_id, storage_account_id, bl resource_group_name=backup_resource_group_name, account_name=backup_storage_account_name, parameters=storage_params).result() - - backup_storage_account_container_name = __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) - + + backup_storage_account_container_name = _generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + print(f"\tStorage Account: {backup_storage_account.id}") print(f"\tCreating blob container: {backup_storage_account_container_name}") storage_client.blob_containers.create(storage_account_rg, backup_storage_account_name, backup_storage_account_container_name, {}) - print(f"\t[OK] Storage account ready") - + print("\t[OK] Storage account ready") + return backup_storage_account, backup_storage_account_name, backup_storage_account_container_name -def __install_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_storage_account_name, backup_storage_account_container_name, backup_resource_group_name, backup_storage_account): +def _install_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_storage_account_name, backup_storage_account_container_name, backup_resource_group_name, backup_storage_account): """Install backup extension on the cluster.""" - backup_extension = __create_backup_extension( + backup_extension = _create_backup_extension( cmd, cluster_subscription_id, cluster_resource_group_name, @@ -441,32 +441,32 @@ def __install_backup_extension(cmd, cluster_subscription_id, cluster_resource_gr backup_resource_group_name, cluster_subscription_id) - __check_and_assign_role( + _check_and_assign_role( cmd, role="Storage Blob Data Contributor", assignee=backup_extension.aks_assigned_identity.principal_id, scope=backup_storage_account.id, identity_name="backup extension identity") - print(f"\t[OK] Backup extension ready") - + print("\t[OK] Backup extension ready") + return backup_extension -def __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): +def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): """ Search for an existing AKS backup vault in the subscription by tag. - + Looks for backup vaults with tag: AKSAzureBackup = - + Returns: backup_vault if found, None otherwise """ from azext_dataprotection.aaz.latest.dataprotection.backup_vault import List as _BackupVaultList - + try: # List all backup vaults in the subscription vaults = _BackupVaultList(cli_ctx=cmd.cli_ctx)(command_args={}) - + for vault in vaults: if vault.get('tags'): # Check if this vault has the AKS backup tag matching the location @@ -479,10 +479,10 @@ def __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) return None -def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): +def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): """Create or use backup vault.""" from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Create as _BackupVaultCreate - + if backup_strategy == 'Custom' and backup_vault_id: # Use provided vault for Custom strategy vault_parts = parse_resource_id(backup_vault_id) @@ -497,22 +497,22 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip else: # Search for existing backup vault with matching tag print(f"\tSearching for existing AKS backup vault in region {cluster_location}...") - backup_vault = __find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) - + backup_vault = _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) + if backup_vault: # Found existing vault - reuse it backup_vault_name = backup_vault['name'] print(f"\tFound existing backup vault: {backup_vault_name}") else: # Create new backup vault with AKS backup tag - backup_vault_name = __generate_backup_vault_name(cluster_location) + backup_vault_name = _generate_backup_vault_name(cluster_location) print(f"\tCreating backup vault: {backup_vault_name}") - + # Build tags - include AKS backup tag plus any user-provided tags vault_tags = {AKS_BACKUP_TAG_KEY: cluster_location} if resource_tags: vault_tags.update(resource_tags) - + backup_vault_args = { "vault_name": backup_vault_name, "resource_group": backup_resource_group_name, @@ -524,29 +524,29 @@ def __setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscrip backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() print(f"\tBackup Vault: {backup_vault['id']}") - __check_and_assign_role( + _check_and_assign_role( cmd, role="Reader", assignee=backup_vault["identity"]["principalId"], scope=cluster_resource.id, identity_name="backup vault identity (on cluster)") - __check_and_assign_role( + _check_and_assign_role( cmd, role="Reader", assignee=backup_vault["identity"]["principalId"], scope=backup_resource_group.id, identity_name="backup vault identity (on resource group)") - print(f"\t[OK] Backup vault ready") - + print("\t[OK] Backup vault ready") + return backup_vault, backup_vault_name -def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id): +def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id): """Create or use backup policy.""" from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate from azext_dataprotection.aaz.latest.dataprotection.backup_policy import List as _BackupPolicyList - + # Create or use backup policy if backup_strategy == 'Custom' and backup_policy_id: # Use provided policy for Custom strategy @@ -558,9 +558,9 @@ def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_ vault_rg_for_policy = backup_resource_group_name if backup_strategy == 'Custom' and backup_vault_id: vault_rg_for_policy = parse_resource_id(backup_vault_id)['resource_group'] - + # Check if policy already exists in this vault - backup_policy_name = __generate_backup_policy_name(backup_strategy) + backup_policy_name = _generate_backup_policy_name(backup_strategy) existing_policy = None try: policies = _BackupPolicyList(cli_ctx=cmd.cli_ctx)(command_args={ @@ -573,43 +573,43 @@ def __setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_ break except Exception: pass - + if existing_policy: print(f"\tFound existing backup policy: {backup_policy_name}") backup_policy = existing_policy else: # Create policy based on strategy - policy_config = __get_policy_config_for_strategy(backup_strategy) + policy_config = _get_policy_config_for_strategy(backup_strategy) print(f"\tCreating backup policy: {backup_policy_name}") - + backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ "backup_policy_name": backup_policy_name, "resource_group": vault_rg_for_policy, "vault_name": backup_vault_name, "policy": policy_config }) - + print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") - print(f"\t[OK] Backup policy ready") - + print("\t[OK] Backup policy ready") + return backup_policy -def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault): +def _setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault): """Setup trusted access role binding between backup vault and cluster.""" from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice import ContainerServiceClient - from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice.v2024_07_01.models import TrustedAccessRoleBinding - + from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice.models import TrustedAccessRoleBinding + cluster_client = get_mgmt_service_client(cmd.cli_ctx, ContainerServiceClient, subscription_id=cluster_subscription_id) vault_id = backup_vault["id"] vault_name = backup_vault["name"] - - print(f"\tConfiguring trusted access between:") + + print("\tConfiguring trusted access between:") print(f"\t\t- Backup Vault: {vault_name}") print(f"\t\t- AKS Cluster: {cluster_name}") - + # Check if trusted access binding already exists for this vault-cluster pair - print(f"\tChecking for existing trusted access binding...") + print("\tChecking for existing trusted access binding...") try: existing_bindings = cluster_client.trusted_access_role_bindings.list( resource_group_name=cluster_resource_group_name, @@ -618,17 +618,17 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ for binding in existing_bindings: if binding.source_resource_id.lower() == vault_id.lower(): print(f"\tFound existing binding: {binding.name}") - print(f"\t[OK] Trusted access already configured") + print("\t[OK] Trusted access already configured") return except Exception: # If we can't list, we'll try to create pass - + # Create new trusted access role binding with GUID-based name - binding_name = __generate_trusted_access_role_binding_name() + binding_name = _generate_trusted_access_role_binding_name() print(f"\tCreating trusted access role binding: {binding_name}") - print(f"\t\tRole: Microsoft.DataProtection/backupVaults/backup-operator") - + print("\t\tRole: Microsoft.DataProtection/backupVaults/backup-operator") + _trusted_access_role_binding = TrustedAccessRoleBinding( source_resource_id=vault_id, roles=["Microsoft.DataProtection/backupVaults/backup-operator"]) @@ -638,26 +638,26 @@ def __setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_ resource_name=cluster_name, trusted_access_role_binding_name=binding_name, trusted_access_role_binding=_trusted_access_role_binding).result() - print(f"\t[OK] Trusted access configured - vault can now access cluster for backup operations") + print("\t[OK] Trusted access configured - vault can now access cluster for backup operations") -def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group): +def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group): """Create backup instance.""" from azext_dataprotection.manual.aaz_operations.backup_instance import ValidateAndCreate as _BackupInstanceValidateAndCreate import uuid backup_instance_name = f"{cluster_name}-{str(uuid.uuid4())[:8]}" - + # Get vault RG for backup instance - use backup RG unless custom vault provided vault_rg_for_bi = backup_resource_group_name if backup_strategy == 'Custom' and backup_vault_id: vault_rg_for_bi = parse_resource_id(backup_vault_id)['resource_group'] - + # Get policy ID policy_id_for_bi = backup_policy.get("id") if isinstance(backup_policy, dict) else backup_policy_id - + print(f"\tCreating backup instance: {backup_instance_name}") - backup_instance_payload = __get_backup_instance_payload( + backup_instance_payload = _get_backup_instance_payload( backup_instance_name=backup_instance_name, cluster_name=cluster_name, datasource_id=datasource_id, @@ -665,36 +665,36 @@ def __create_backup_instance(cmd, cluster_name, cluster_resource_group_name, dat policy_id=policy_id_for_bi, backup_resource_group_id=backup_resource_group.id ) - + backup_instance = _BackupInstanceValidateAndCreate(cli_ctx=cmd.cli_ctx)(command_args={ "backup_instance_name": backup_instance_name, "resource_group": vault_rg_for_bi, "vault_name": backup_vault_name, "backup_instance": backup_instance_payload }).result() - + # Check and report the protection state protection_state = backup_instance.get('properties', {}).get('currentProtectionState', 'Unknown') print(f"\tProtection State: {protection_state}") - + if protection_state == "ProtectionConfigured": - print(f"\t[OK] Backup instance created and protection configured") + print("\t[OK] Backup instance created and protection configured") elif protection_state == "ConfiguringProtection": - print(f"\t[OK] Backup instance created - protection configuration in progress") + print("\t[OK] Backup instance created - protection configuration in progress") elif protection_state == "ProtectionError": error_details = backup_instance.get('properties', {}).get('protectionErrorDetails', {}) error_msg = error_details.get('message', 'Unknown error') if isinstance(error_details, dict) else str(error_details) print(f"\t[WARNING] Backup instance created but protection has errors: {error_msg}") else: - print(f"\t[OK] Backup instance created") - + print("\t[OK] Backup instance created") + return backup_instance, policy_id_for_bi def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy='Week', configuration_params=None): """ Enable backup for an AKS cluster. - + Args: cmd: CLI command context datasource_id: Full ARM resource ID of the AKS cluster @@ -706,16 +706,16 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("=" * 60) print(f"Datasource ID: {datasource_id}") print(f"Backup Strategy: {backup_strategy}") - + # Parse configuration_params if configuration_params is None: configuration_params = {} if isinstance(configuration_params, str): configuration_params = json.loads(configuration_params) - + # Validate request (raises on failure) - __validate_request(datasource_id, backup_strategy, configuration_params) - + _validate_request(datasource_id, backup_strategy, configuration_params) + # Extract configuration values (camelCase keys) resource_tags = configuration_params.get("tags") storage_account_id = configuration_params.get("storageAccountResourceId") @@ -723,66 +723,66 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy backup_resource_group_id = configuration_params.get("backupResourceGroupId") backup_vault_id = configuration_params.get("backupVaultId") backup_policy_id = configuration_params.get("backupPolicyId") - + # Parse cluster details from resource ID cluster_id_parts = parse_resource_id(datasource_id) cluster_subscription_id = cluster_id_parts['subscription'] cluster_resource_group_name = cluster_id_parts['resource_group'] cluster_name = cluster_id_parts['name'] - + if resource_tags: print(f"Resource Tags: {json.dumps(resource_tags)}") from azure.mgmt.resource import ResourceManagementClient resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) - + # Pre-check: Verify no existing backup instance for this cluster - print(f"\n[Pre-check] Checking for existing backup...") - __check_existing_backup_instance(resource_client, datasource_id, cluster_name) - + print("\n[Pre-check] Checking for existing backup...") + _check_existing_backup_instance(resource_client, datasource_id, cluster_name) + # Step 1: Validate cluster - print(f"\n[1/8] Validating cluster...") - cluster_resource, cluster_location = __validate_cluster(resource_client, datasource_id, cluster_name) - + print("\n[1/8] Validating cluster...") + cluster_resource, cluster_location = _validate_cluster(resource_client, datasource_id, cluster_name) + # Step 2: Setup resource group - print(f"\n[2/8] Setting up backup resource group...") - backup_resource_group, backup_resource_group_name = __setup_resource_group( + print("\n[2/8] Setting up backup resource group...") + backup_resource_group, backup_resource_group_name = _setup_resource_group( cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_resource.identity.principal_id, resource_tags) - + # Step 3: Setup storage account - print(f"\n[3/8] Setting up storage account...") - backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = __setup_storage_account( + print("\n[3/8] Setting up storage account...") + backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) - + # Step 4: Install backup extension - print(f"\n[4/8] Installing backup extension...") - __install_backup_extension( + print("\n[4/8] Installing backup extension...") + _install_backup_extension( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_storage_account_name, backup_storage_account_container_name, backup_resource_group_name, backup_storage_account) - + # Step 5: Setup backup vault - print(f"\n[5/8] Setting up backup vault...") - backup_vault, backup_vault_name = __setup_backup_vault( + print("\n[5/8] Setting up backup vault...") + backup_vault, backup_vault_name = _setup_backup_vault( cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags) - + # Step 6: Setup backup policy - print(f"\n[6/8] Setting up backup policy...") - backup_policy = __setup_backup_policy( + print("\n[6/8] Setting up backup policy...") + backup_policy = _setup_backup_policy( cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id) - + # Step 7: Setup trusted access - print(f"\n[7/8] Setting up trusted access...") - __setup_trusted_access( + print("\n[7/8] Setting up trusted access...") + _setup_trusted_access( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault) - + # Step 8: Create backup instance - print(f"\n[8/8] Configuring backup instance...") - backup_instance, policy_id_for_bi = __create_backup_instance( + print("\n[8/8] Configuring backup instance...") + backup_instance, policy_id_for_bi = _create_backup_instance( cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group) @@ -799,9 +799,9 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("=" * 60) -def __get_policy_config_for_strategy(backup_strategy): +def _get_policy_config_for_strategy(backup_strategy): """Get backup policy configuration based on strategy. - + Strategies: - Week: 7 days operational tier, 7 days vault tier - Month: 30 days operational tier, 30 days vault tier @@ -811,7 +811,7 @@ def __get_policy_config_for_strategy(backup_strategy): # Operational tier retention based on strategy op_tier_retention = "P7D" # Week default vault_tier_retention = "P7D" # Week default - + if backup_strategy == 'Week': op_tier_retention = "P7D" vault_tier_retention = "P7D" @@ -824,7 +824,7 @@ def __get_policy_config_for_strategy(backup_strategy): elif backup_strategy == 'DisasterRecovery': op_tier_retention = "P7D" vault_tier_retention = "P90D" # 90 days for DR scenarios - + policy_rules = [ # Operational Store Default Retention Rule { @@ -911,7 +911,7 @@ def __get_policy_config_for_strategy(backup_strategy): } } ] - + return { "objectType": "BackupPolicy", "datasourceTypes": [ @@ -921,7 +921,7 @@ def __get_policy_config_for_strategy(backup_strategy): } -def __get_backup_instance_payload(backup_instance_name, cluster_name, datasource_id, cluster_location, policy_id, backup_resource_group_id): +def _get_backup_instance_payload(backup_instance_name, cluster_name, datasource_id, cluster_location, policy_id, backup_resource_group_id): """Get backup instance payload for AKS cluster.""" return { "backup_instance_name": backup_instance_name, @@ -969,34 +969,34 @@ def __get_backup_instance_payload(backup_instance_name, cluster_name, datasource } -def __generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): +def _generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" -def __generate_backup_resource_group_name(cluster_location): +def _generate_backup_resource_group_name(cluster_location): """ Generate backup resource group name (one per region, shared across clusters). - + Naming constraints: - Length: 1-90 characters - Allowed characters: alphanumerics, underscores, parentheses, hyphens, periods - Cannot end with a period - + Format: AKSAzureBackup_ (one resource group per region) Example: AKSAzureBackup_eastasia """ return f"AKSAzureBackup_{cluster_location}" -def __generate_backup_storage_account_name(cluster_location): +def _generate_backup_storage_account_name(cluster_location): """ Generate backup storage account name (one per region, shared across clusters). - + Naming constraints: - Length: 3-24 characters - Allowed characters: lowercase letters and numbers only - Must be globally unique - + Format: aksbkp (one storage account per region) Example: aksbkpeastasia1a2b3c """ @@ -1010,21 +1010,21 @@ def __generate_backup_storage_account_name(cluster_location): return f"aksbkp{sanitized_location}{guid_suffix}" -def __generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name): +def _generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name): """ Generate backup blob container name (unique per cluster). - + Naming constraints: - Length: 3-63 characters - Allowed characters: lowercase letters, numbers, and hyphens - Must start with a letter or number - Cannot contain consecutive hyphens - + Format: - Example: contoso-aks-hack-contoso-aks-rg """ import re - + def sanitize(name): # Lowercase, replace invalid chars with hyphens sanitized = re.sub(r'[^a-z0-9-]', '-', name.lower()) @@ -1032,52 +1032,52 @@ def sanitize(name): sanitized = re.sub(r'-+', '-', sanitized) # Remove leading/trailing hyphens return sanitized.strip('-') - + sanitized_cluster = sanitize(cluster_name) sanitized_rg = sanitize(cluster_resource_group_name) - + # Combine and truncate to 63 chars max container_name = f"{sanitized_cluster}-{sanitized_rg}" return container_name[:63].rstrip('-') -def __generate_backup_vault_name(cluster_location): +def _generate_backup_vault_name(cluster_location): """ Generate backup vault name (one per region, shared across clusters). - + Naming constraints: - Length: 2-50 characters - Allowed characters: alphanumerics and hyphens - Must start with a letter - Cannot end with a hyphen - + Format: AKSAzureBackup- (one vault per region) Example: AKSAzureBackup-eastasia """ return f"AKSAzureBackup-{cluster_location}" -def __generate_backup_policy_name(backup_strategy): +def _generate_backup_policy_name(backup_strategy): """ Generate backup policy name (shared per strategy). - + Naming constraints: - Length: 3-150 characters - Allowed characters: alphanumerics and hyphens - + Format: AKSBackupPolicy- """ return f"AKSBackupPolicy-{backup_strategy}" -def __generate_trusted_access_role_binding_name(): +def _generate_trusted_access_role_binding_name(): """ Generate trusted access role binding name using GUID. - + Naming constraints: - Length: 1-24 characters - Allowed characters: alphanumerics, underscores, hyphens - + Format: tarb- Example: tarb-a1b2c3d4e5f6g7h8 """ @@ -1087,7 +1087,8 @@ def __generate_trusted_access_role_binding_name(): # "tarb-" (5 chars) + guid (16 chars) = 21 chars return f"tarb-{guid_suffix}" -def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name, storage_account_name, storage_account_container_name, storage_account_resource_group, storage_account_subscription_id): + +def _create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name, storage_account_name, storage_account_container_name, storage_account_resource_group, storage_account_subscription_id): from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=subscription_id) @@ -1095,8 +1096,8 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster cluster_rp="Microsoft.ContainerService", cluster_resource_name="managedClusters", resource_group_name=resource_group_name, - cluster_name=cluster_name) - + cluster_name=cluster_name) + for page in extensions.by_page(): for extension in page: if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': @@ -1121,8 +1122,8 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster f"Please wait for the operation to complete and try again." ) - print(f"\tInstalling data protection extension (azure-aks-backup)...") - + print("\tInstalling data protection extension (azure-aks-backup)...") + from azure.cli.core.extension.operations import add_extension_to_path from importlib import import_module add_extension_to_path("k8s-extension") @@ -1148,11 +1149,11 @@ def __create_backup_extension(cmd, subscription_id, resource_group_name, cluster "storageAccountSubscriptionId": storage_account_subscription_id }] ).result() - + # Verify extension is in healthy state after installation if extension.provisioning_state == "Succeeded": - print(f"\tExtension installed and healthy (Provisioning State: Succeeded)") + print("\tExtension installed and healthy (Provisioning State: Succeeded)") else: print(f"\tWarning: Extension provisioning state is '{extension.provisioning_state}'") - - return extension \ No newline at end of file + + return extension diff --git a/src/dataprotection/azext_dataprotection/manual/commands.py b/src/dataprotection/azext_dataprotection/manual/commands.py index 2bc02116101..e58a1e36968 100644 --- a/src/dataprotection/azext_dataprotection/manual/commands.py +++ b/src/dataprotection/azext_dataprotection/manual/commands.py @@ -131,5 +131,4 @@ def load_command_table(self, _): g.custom_command('list', 'dataprotection_recovery_point_list') with self.command_group('dataprotection enable-backup', exception_handler=exception_handler) as g: - g.custom_command('trigger', 'dataprotection_enable_backup', supports_no_wait=True) - \ No newline at end of file + g.custom_command('trigger', 'dataprotection_enable_backup') diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 673eeec8e5a..388bec4887f 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1151,52 +1151,53 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, return restore_request + def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_strategy=None, backup_configuration_file=None): """Enable backup for a datasource using a single command. - + This command orchestrates all the steps required to enable backup: - Creates backup infrastructure (resource group, storage account, vault) - Installs required extensions - Configures backup instance with specified strategy """ from azext_dataprotection.manual.enums import get_backup_strategies_for_datasource - + # Supported datasource types supported_datasource_types = ["AzureKubernetesService"] - + # Validate datasource type is supported if datasource_type not in supported_datasource_types: raise InvalidArgumentValueError( f"Unsupported datasource type: {datasource_type}. " f"Supported types: {', '.join(supported_datasource_types)}" ) - + # Get valid strategies for this datasource type valid_strategies = get_backup_strategies_for_datasource(datasource_type) - + # Set default strategy based on datasource type if backup_strategy is None: if datasource_type == "AzureKubernetesService": backup_strategy = 'Week' # Add defaults for other datasource types here as they are supported - + # Validate strategy for datasource type if backup_strategy not in valid_strategies: raise InvalidArgumentValueError( f"Invalid backup-strategy '{backup_strategy}' for {datasource_type}. " f"Allowed values: {', '.join(valid_strategies)}" ) - + # Parse configuration from file or dict config = _parse_backup_configuration(backup_configuration_file) - + # Route to datasource-specific handler if datasource_type == "AzureKubernetesService": if "Microsoft.ContainerService/managedClusters".lower() not in datasource_id.lower(): raise InvalidArgumentValueError( "datasource-id must be an AKS cluster resource ID for AzureKubernetesService datasource type" ) - + from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper dataprotection_enable_backup_helper(cmd, datasource_id, backup_strategy, config) return @@ -1204,25 +1205,25 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_str def _parse_backup_configuration(backup_configuration_file): """Parse backup configuration from file or dict into a dictionary. - + Args: backup_configuration_file: Can be: - None: Returns empty dict - dict: Returns as-is (already parsed by validate_file_or_dict) - str: JSON string to parse - + Returns: dict: Parsed configuration """ import json - + if backup_configuration_file is None: return {} - + # If it's already a dict, return as-is (validate_file_or_dict already parsed the file) if isinstance(backup_configuration_file, dict): return backup_configuration_file - + # If it's a string, try to parse as JSON if isinstance(backup_configuration_file, str): try: @@ -1231,5 +1232,5 @@ def _parse_backup_configuration(backup_configuration_file): raise InvalidArgumentValueError( f"Invalid JSON in backup-configuration-file: '{backup_configuration_file}'" ) - + return {} diff --git a/src/dataprotection/azext_dataprotection/manual/enums.py b/src/dataprotection/azext_dataprotection/manual/enums.py index fd2291356f8..f8750b9b27b 100644 --- a/src/dataprotection/azext_dataprotection/manual/enums.py +++ b/src/dataprotection/azext_dataprotection/manual/enums.py @@ -117,4 +117,4 @@ def get_backup_strategies_for_datasource(datasource_type): strategies = { "AzureKubernetesService": get_aks_backup_strategies(), } - return strategies.get(datasource_type, []) \ No newline at end of file + return strategies.get(datasource_type, []) diff --git a/src/dataprotection/setup.py b/src/dataprotection/setup.py index 0af4bc59016..b42d0a2c53f 100644 --- a/src/dataprotection/setup.py +++ b/src/dataprotection/setup.py @@ -10,7 +10,7 @@ from setuptools import setup, find_packages # HISTORY.rst entry. -VERSION = '1.8.0' +VERSION = '1.9.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers From 105a57adb6663a3b116574839cd334b12ce1ed0a Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Tue, 10 Mar 2026 15:48:40 +0530 Subject: [PATCH 13/24] final changes Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/_help.py | 50 ++++ .../manual/aks/aks_helper.py | 236 +++++++++++++----- 2 files changed, 224 insertions(+), 62 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/_help.py b/src/dataprotection/azext_dataprotection/manual/_help.py index 6dd7c8d0167..021392e2bcc 100644 --- a/src/dataprotection/azext_dataprotection/manual/_help.py +++ b/src/dataprotection/azext_dataprotection/manual/_help.py @@ -300,9 +300,59 @@ helps['dataprotection enable-backup trigger'] = """ type: command short-summary: Enable backup for an AKS cluster by setting up all required resources including backup vault, policy, storage account, extension, and trusted access. + long-summary: | + This command orchestrates all the steps required to enable backup for an AKS cluster: + 1. Creates or reuses a backup resource group, storage account, and blob container + 2. Installs the backup extension on the cluster (or reuses an existing one) + 3. Creates or reuses a backup vault and backup policy + 4. Configures trusted access and role assignments + 5. Creates a backup instance + + The --backup-configuration-file parameter accepts a JSON file (@file.json) or inline JSON string with the following optional settings: + - storageAccountResourceId: ARM ID of an existing storage account to use + - blobContainerName: Name of an existing blob container (used with storageAccountResourceId) + - backupResourceGroupId: ARM ID of an existing resource group for backup resources + - backupVaultId: ARM ID of an existing backup vault (required for Custom strategy) + - backupPolicyId: ARM ID of an existing backup policy (required for Custom strategy) + - tags: Dictionary of tags to apply to created resources (e.g., {"Owner": "team", "Env": "prod"}) + + Backup strategy presets (--backup-strategy): + - Week (default): Daily backups with 7-day retention in both Operational and Vault tiers. + - Month: Daily backups with 30-day retention in both Operational and Vault tiers. + - Immutable: Daily backups with 7-day Operational tier + 30-day Vault tier retention. + - DisasterRecovery: Daily backups with 7-day Operational tier + 90-day Vault tier retention for cross-region restore scenarios. + - Custom: Bring your own vault and policy. Requires backupVaultId and backupPolicyId in --backup-configuration-file. examples: - name: Enable backup for an AKS cluster with default Week strategy text: az dataprotection enable-backup trigger --datasource-type AzureKubernetesService --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} - name: Enable backup with Month strategy text: az dataprotection enable-backup trigger --datasource-type AzureKubernetesService --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} --backup-strategy Month + - name: Enable backup with Custom strategy using existing vault and policy + text: | + az dataprotection enable-backup trigger --datasource-type AzureKubernetesService \\ + --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} \\ + --backup-strategy Custom \\ + --backup-configuration-file @config.json + + Where config.json contains: + { + "backupVaultId": "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.DataProtection/backupVaults/{vault}", + "backupPolicyId": "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.DataProtection/backupVaults/{vault}/backupPolicies/{policy}" + } + - name: Enable backup with resource tags for policy compliance + text: | + az dataprotection enable-backup trigger --datasource-type AzureKubernetesService \\ + --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} \\ + --backup-configuration-file '{"tags": {"Owner": "team", "Environment": "prod", "DeleteBy": "2026-12"}}' + - name: Enable backup using an existing storage account + text: | + az dataprotection enable-backup trigger --datasource-type AzureKubernetesService \\ + --datasource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/{cluster} \\ + --backup-configuration-file @config.json + + Where config.json contains: + { + "storageAccountResourceId": "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Storage/storageAccounts/{sa}", + "blobContainerName": "my-backup-container" + } """ diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 8d07f7bdf04..49700557427 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -15,11 +15,11 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", Args: cmd: CLI command context - role: Role name (e.g., 'Contributor', 'Reader', 'Storage Blob Data Contributor') + role: Role name (e.g., 'Contributor', 'Reader') assignee: Principal ID of the identity to assign the role to scope: Resource ID scope for the role assignment - identity_name: Friendly name of the identity for error messages - max_retries: Max retries for transient failures (like identity not propagated yet) + identity_name: Friendly name for log messages + max_retries: Max retries for transient failures retry_delay: Delay in seconds between retries Returns: @@ -27,73 +27,52 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", """ import time from azure.cli.command_modules.role.custom import list_role_assignments, create_role_assignment - from azure.core.exceptions import HttpResponseError # Check if role assignment already exists try: - existing_assignments = list_role_assignments( - cmd, - assignee=assignee, - role=role, - scope=scope, - include_inherited=True - ) - - if existing_assignments: + if list_role_assignments(cmd, assignee=assignee, role=role, scope=scope, include_inherited=True): print(f"\tRole '{role}' already assigned to {identity_name}") return True - except Exception: - # If we can't list, we'll try to create and handle any errors there - pass + except Exception as e: + print(f"\tWarning: Could not list role assignments for {identity_name}: {str(e)[:100]}") + # Continue to try creating the assignment - # Try to create the role assignment with retries for transient failures - last_error = None + # Try to create with retries for identity propagation delay for attempt in range(max_retries): try: - create_role_assignment( - cmd, - role=role, - assignee=assignee, - scope=scope - ) + create_role_assignment(cmd, role=role, assignee=assignee, scope=scope) print(f"\tRole '{role}' assigned to {identity_name}") return True - except (HttpResponseError, Exception) as e: - error_message = str(e) - last_error = error_message + except Exception as e: + error_str = str(e).lower() - # Check if this is a "already exists" conflict (409) - if "already exists" in error_message.lower() or "conflict" in error_message.lower(): + # Already exists — treat as success + if "conflict" in error_str or "already exists" in error_str: print(f"\tRole '{role}' already assigned to {identity_name}") return True - # Check if this is a permission/authorization error (not retryable) - if "authorization" in error_message.lower() or "forbidden" in error_message.lower() or "permission" in error_message.lower(): + # Principal not found — retryable (identity propagation) + is_propagation_error = "principal" in error_str or "does not exist" in error_str or "cannot find" in error_str + if is_propagation_error and attempt < max_retries - 1: + print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") + time.sleep(retry_delay) + continue + + # Permission denied — actionable error + if "authorization" in error_str or "forbidden" in error_str: raise InvalidArgumentValueError( - f"Failed to assign '{role}' role to {identity_name}.\n" - f"You don't have sufficient permissions to create role assignments.\n\n" - f"Please ask your administrator to run the following command:\n\n" - f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n\n" - f"After the role is assigned, re-run this command." + f"Insufficient permissions to assign '{role}' role to {identity_name}.\n" + f"Run manually:\n\n" + f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n" ) - # Check if this is a "principal not found" error (retryable - identity propagation) - if "cannot find" in error_message.lower() or "does not exist" in error_message.lower() or "principal" in error_message.lower(): - if attempt < max_retries - 1: - print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") - time.sleep(retry_delay) - continue - - # For other errors, don't retry + # Non-retryable error — break and raise break - # If we get here, we've exhausted retries or hit a non-retryable error raise InvalidArgumentValueError( f"Failed to assign '{role}' role to {identity_name}.\n" - f"Error: {last_error}\n\n" - f"You can try to manually assign the role using:\n\n" - f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n\n" - f"After the role is assigned, re-run this command." + f"Run manually:\n\n" + f" az role assignment create --role \"{role}\" --assignee \"{assignee}\" --scope \"{scope}\"\n" ) @@ -218,7 +197,7 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name # Format: /subscriptions/.../resourceGroups/.../providers/Microsoft.DataProtection/backupVaults/{vault}/backupInstances/{bi} vault_name = "Unknown" vault_rg = "Unknown" - if bi_id and '/backupVaults/' in str(bi_id): + if bi_id and '/backupvaults/' in str(bi_id).lower(): bi_parts = parse_resource_id(bi_id) vault_name = bi_parts.get('name', 'Unknown') vault_rg = bi_parts.get('resource_group', 'Unknown') @@ -412,6 +391,7 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blo "kind": "StorageV2", "sku": {"name": "Standard_LRS"}, "allow_blob_public_access": False, + "allow_shared_key_access": False, "tags": sa_tags } backup_storage_account = storage_client.storage_accounts.begin_create( @@ -452,6 +432,85 @@ def _install_backup_extension(cmd, cluster_subscription_id, cluster_resource_gro return backup_extension +def _get_existing_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name): + """ + Check if a backup extension already exists on the cluster. + + Returns: + extension object if found and healthy, None if not found. + Raises on Failed or transient states. + """ + from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient + k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=cluster_subscription_id) + + try: + extensions = k8s_configuration_client.extensions.list( + cluster_rp="Microsoft.ContainerService", + cluster_resource_name="managedClusters", + resource_group_name=cluster_resource_group_name, + cluster_name=cluster_name) + + for page in extensions.by_page(): + for extension in page: + if extension.extension_type and extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': + provisioning_state = extension.provisioning_state + if provisioning_state == "Succeeded": + return extension + elif provisioning_state == "Failed": + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' exists on cluster '{cluster_name}' but is in Failed state.\n" + f"Please take corrective action before running this command again:\n" + f" 1. Check extension logs: az k8s-extension show --name {extension.name} --cluster-name {cluster_name} --resource-group {cluster_resource_group_name} --cluster-type managedClusters\n" + f" 2. Delete the failed extension: az k8s-extension delete --name {extension.name} --cluster-name {cluster_name} --resource-group {cluster_resource_group_name} --cluster-type managedClusters --yes\n" + f" 3. Re-run this command to install a fresh extension.\n" + f"For troubleshooting, visit: https://aka.ms/aksclusterbackup" + ) + else: + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' is in '{provisioning_state}' state.\n" + f"Please wait for the operation to complete and try again." + ) + except InvalidArgumentValueError: + raise + except Exception: + pass + + return None + + +def _get_storage_account_from_extension(cmd, extension, cluster_subscription_id): + """ + Extract the storage account details from an existing backup extension's configuration. + + The extension stores config in Velero-style keys: + - configuration.backupStorageLocation.config.storageAccount + - configuration.backupStorageLocation.bucket + - configuration.backupStorageLocation.config.resourceGroup + + Returns: + tuple: (storage_account_object, storage_account_name, container_name, resource_group) + """ + from azure.mgmt.storage import StorageManagementClient + + config = extension.configuration_settings or {} + sa_name = config.get("configuration.backupStorageLocation.config.storageAccount") + container = config.get("configuration.backupStorageLocation.bucket") + sa_rg = config.get("configuration.backupStorageLocation.config.resourceGroup") + + if not sa_name or not sa_rg: + return None, None, None, None + + print(f"\tExtension is configured with storage account: {sa_name} (RG: {sa_rg}, container: {container})") + + storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) + try: + sa = storage_client.storage_accounts.get_properties(sa_rg, sa_name) + return sa, sa_name, container, sa_rg + except Exception as e: + print(f"\tWarning: Could not fetch storage account '{sa_name}' from extension config: {str(e)[:100]}") + return None, None, None, None + + def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): """ Search for an existing AKS backup vault in the subscription by tag. @@ -537,6 +596,13 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript assignee=backup_vault["identity"]["principalId"], scope=backup_resource_group.id, identity_name="backup vault identity (on resource group)") + + _check_and_assign_role( + cmd, + role="Disk Snapshot Contributor", + assignee=backup_vault["identity"]["principalId"], + scope=backup_resource_group.id, + identity_name="backup vault identity (snapshot contributor on resource group)") print("\t[OK] Backup vault ready") return backup_vault, backup_vault_name @@ -750,18 +816,56 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_resource.identity.principal_id, resource_tags) - # Step 3: Setup storage account - print("\n[3/8] Setting up storage account...") - backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( - cmd, cluster_subscription_id, storage_account_id, blob_container_name, - backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) - - # Step 4: Install backup extension - print("\n[4/8] Installing backup extension...") - _install_backup_extension( - cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, - backup_storage_account_name, backup_storage_account_container_name, - backup_resource_group_name, backup_storage_account) + # Step 3 & 4: Check extension first, then handle storage account accordingly + # If the extension is already installed, use its configured storage account + # instead of creating/finding a new one (which may be different). + print("\n[3/8] Checking for existing backup extension...") + existing_extension = _get_existing_backup_extension( + cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name) + + if existing_extension: + print(f"\tBackup extension already installed: {existing_extension.name}") + + # Extract the storage account the extension is actually configured with + ext_sa, ext_sa_name, ext_container, ext_sa_rg = _get_storage_account_from_extension( + cmd, existing_extension, cluster_subscription_id) + + if ext_sa: + # Use the extension's configured storage account for all subsequent operations + backup_storage_account = ext_sa + backup_storage_account_name = ext_sa_name + backup_storage_account_container_name = ext_container + print(f"\tUsing extension's storage account: {ext_sa_name}") + else: + # Fallback: extension exists but we can't read its config — setup storage account normally + print("\tWarning: Could not read extension storage config, setting up storage account...") + backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( + cmd, cluster_subscription_id, storage_account_id, blob_container_name, + backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) + + # Ensure extension identity has correct role on its storage account + _check_and_assign_role( + cmd, + role="Storage Blob Data Contributor", + assignee=existing_extension.aks_assigned_identity.principal_id, + scope=backup_storage_account.id, + identity_name="backup extension identity") + print("\t[OK] Storage account ready") + + print("\n[4/8] Backup extension already installed...") + print("\t[OK] Backup extension ready") + else: + # No extension — setup storage account first, then install extension + print("\tNo existing extension found, setting up storage account...") + backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( + cmd, cluster_subscription_id, storage_account_id, blob_container_name, + backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) + + print("\n[4/8] Installing backup extension...") + _install_backup_extension( + cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, + backup_storage_account_name, backup_storage_account_container_name, + backup_resource_group_name, backup_storage_account) # Step 5: Setup backup vault print("\n[5/8] Setting up backup vault...") @@ -769,6 +873,14 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags) + # Grant vault identity read access to the backup storage account + _check_and_assign_role( + cmd, + role="Storage Blob Data Reader", + assignee=backup_vault["identity"]["principalId"], + scope=backup_storage_account.id, + identity_name="backup vault identity (on storage account)") + # Step 6: Setup backup policy print("\n[6/8] Setting up backup policy...") backup_policy = _setup_backup_policy( From 89b82860b810b825575e4db4f2bff2322c0b6a95 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Mar 2026 13:50:28 +0530 Subject: [PATCH 14/24] refactor vault decision Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/_consts.py | 2 - .../azext_dataprotection/manual/_help.py | 7 +- .../manual/aks/aks_helper.py | 193 +++++++++++------- 3 files changed, 124 insertions(+), 78 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/_consts.py b/src/dataprotection/azext_dataprotection/manual/_consts.py index 9944212ff33..9f54bdf6ce4 100644 --- a/src/dataprotection/azext_dataprotection/manual/_consts.py +++ b/src/dataprotection/azext_dataprotection/manual/_consts.py @@ -6,7 +6,6 @@ # AKS Backup Strategy Constants CONST_BACKUP_STRATEGY_WEEK = "Week" CONST_BACKUP_STRATEGY_MONTH = "Month" -CONST_BACKUP_STRATEGY_IMMUTABLE = "Immutable" CONST_BACKUP_STRATEGY_DISASTER_RECOVERY = "DisasterRecovery" CONST_BACKUP_STRATEGY_CUSTOM = "Custom" @@ -14,7 +13,6 @@ CONST_AKS_BACKUP_STRATEGIES = [ CONST_BACKUP_STRATEGY_WEEK, CONST_BACKUP_STRATEGY_MONTH, - CONST_BACKUP_STRATEGY_IMMUTABLE, CONST_BACKUP_STRATEGY_DISASTER_RECOVERY, CONST_BACKUP_STRATEGY_CUSTOM, ] diff --git a/src/dataprotection/azext_dataprotection/manual/_help.py b/src/dataprotection/azext_dataprotection/manual/_help.py index 021392e2bcc..268f5dcc92c 100644 --- a/src/dataprotection/azext_dataprotection/manual/_help.py +++ b/src/dataprotection/azext_dataprotection/manual/_help.py @@ -317,10 +317,9 @@ - tags: Dictionary of tags to apply to created resources (e.g., {"Owner": "team", "Env": "prod"}) Backup strategy presets (--backup-strategy): - - Week (default): Daily backups with 7-day retention in both Operational and Vault tiers. - - Month: Daily backups with 30-day retention in both Operational and Vault tiers. - - Immutable: Daily backups with 7-day Operational tier + 30-day Vault tier retention. - - DisasterRecovery: Daily backups with 7-day Operational tier + 90-day Vault tier retention for cross-region restore scenarios. + - Week (default): Daily incremental backups with 7-day retention in Operational Store. + - Month: Daily incremental backups with 30-day retention in Operational Store. + - DisasterRecovery: Daily incremental backups with 7-day Operational Store + 90-day Vault Store retention. FirstOfDay backups are copied to Vault Store for cross-region restore. - Custom: Bring your own vault and policy. Requires backupVaultId and backupPolicyId in --backup-configuration-file. examples: - name: Enable backup for an AKS cluster with default Week strategy diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 49700557427..3fbc6d61284 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -538,6 +538,49 @@ def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): return None +def _get_best_vault_storage_type(cmd, subscription_id, location): + """ + Determine the best storage redundancy for a backup vault based on region capabilities. + + Uses the Azure Subscription Locations API to check: + - If region has a paired region → GeoRedundant (GRS) + - If region has availability zones → ZoneRedundant (ZRS) + - Otherwise → LocallyRedundant (LRS) + + Returns: + str: 'GeoRedundant', 'ZoneRedundant', or 'LocallyRedundant' + """ + from azure.mgmt.resource import SubscriptionClient + + try: + sub_client = get_mgmt_service_client(cmd.cli_ctx, SubscriptionClient) + locations = sub_client.subscriptions.list_locations(subscription_id) + + for loc in locations: + if loc.name.lower() == location.lower(): + # Check for paired region → GRS + if hasattr(loc, 'metadata') and loc.metadata: + paired = getattr(loc.metadata, 'paired_region', None) + if paired and len(paired) > 0: + print(f"\tRegion {location} has paired region → GeoRedundant") + return 'GeoRedundant' + + # Check for availability zones → ZRS + if hasattr(loc, 'availability_zone_mappings') and loc.availability_zone_mappings: + if len(loc.availability_zone_mappings) > 0: + print(f"\tRegion {location} has availability zones → ZoneRedundant") + return 'ZoneRedundant' + + # No pair, no zones → LRS + print(f"\tRegion {location} has no paired region or zones → LocallyRedundant") + return 'LocallyRedundant' + except Exception as e: + print(f"\tWarning: Could not determine region capabilities: {str(e)[:100]}") + + # Default fallback + return 'LocallyRedundant' + + def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): """Create or use backup vault.""" from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Create as _BackupVaultCreate @@ -572,14 +615,27 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript if resource_tags: vault_tags.update(resource_tags) + # Determine best storage type based on region capabilities + storage_type = _get_best_vault_storage_type(cmd, cluster_subscription_id, cluster_location) + print(f"\tStorage type: {storage_type}") + backup_vault_args = { "vault_name": backup_vault_name, "resource_group": backup_resource_group_name, "location": cluster_location, "type": "SystemAssigned", - "storage_setting": [{'type': 'LocallyRedundant', 'datastore-type': 'VaultStore'}], + "storage_setting": [{'type': storage_type, 'datastore-type': 'VaultStore'}], + "soft_delete_state": "On", + "soft_delete_retention": 14, + "immutability_state": "Unlocked", + "cross_subscription_restore_state": "Enabled", "tags": vault_tags } + + # Enable CRR only for GRS vaults (requires paired region) + if storage_type == 'GeoRedundant': + backup_vault_args["cross_region_restore_state"] = "Enabled" + backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() print(f"\tBackup Vault: {backup_vault['id']}") @@ -915,36 +971,32 @@ def _get_policy_config_for_strategy(backup_strategy): """Get backup policy configuration based on strategy. Strategies: - - Week: 7 days operational tier, 7 days vault tier - - Month: 30 days operational tier, 30 days vault tier - - Immutable: 7 days operational tier, 30 days vault tier (with immutable retention) - - DisasterRecovery: 7 days operational tier, 90 days vault tier (for cross-region restore) + - Week: 7 days operational tier only, daily incremental + - Month: 30 days operational tier only, daily incremental + - DisasterRecovery: 7 days operational tier + 90 days vault tier, daily incremental with FirstOfDay vault copy """ - # Operational tier retention based on strategy - op_tier_retention = "P7D" # Week default - vault_tier_retention = "P7D" # Week default - if backup_strategy == 'Week': - op_tier_retention = "P7D" - vault_tier_retention = "P7D" + op_retention = "P7D" + vault_retention = None elif backup_strategy == 'Month': - op_tier_retention = "P30D" - vault_tier_retention = "P30D" - elif backup_strategy == 'Immutable': - op_tier_retention = "P7D" - vault_tier_retention = "P30D" # Longer vault retention for immutable + op_retention = "P30D" + vault_retention = None elif backup_strategy == 'DisasterRecovery': - op_tier_retention = "P7D" - vault_tier_retention = "P90D" # 90 days for DR scenarios + op_retention = "P7D" + vault_retention = "P90D" + else: + raise InvalidArgumentValueError( + f"Unknown backup strategy '{backup_strategy}'. Supported strategies: Week, Month, DisasterRecovery, Custom." + ) + # Operational Store retention rule (all strategies) policy_rules = [ - # Operational Store Default Retention Rule { "isDefault": True, "lifecycles": [ { "deleteAfter": { - "duration": op_tier_retention, + "duration": op_retention, "objectType": "AbsoluteDeleteOption" }, "sourceDataStore": { @@ -956,14 +1008,17 @@ def _get_policy_config_for_strategy(backup_strategy): ], "name": "Default", "objectType": "AzureRetentionRule" - }, - # Vault Store Retention Rule - { + } + ] + + # Vault Store retention rule (only when vault_retention is set) + if vault_retention: + policy_rules.append({ "isDefault": False, "lifecycles": [ { "deleteAfter": { - "duration": vault_tier_retention, + "duration": vault_retention, "objectType": "AbsoluteDeleteOption" }, "sourceDataStore": { @@ -975,60 +1030,54 @@ def _get_policy_config_for_strategy(backup_strategy): ], "name": "Vault", "objectType": "AzureRetentionRule" - }, - # Backup Rule - Daily backup to Operational Store + }) + + # Tagging criteria — Default for all, Vault (FirstOfDay) when vault tier is enabled + tagging_criteria = [ { - "backupParameters": { - "backupType": "Incremental", - "objectType": "AzureBackupParams" - }, - "dataStore": { - "dataStoreType": "OperationalStore", - "objectType": "DataStoreInfoBase" - }, - "name": "BackupDaily", - "objectType": "AzureBackupRule", - "trigger": { - "objectType": "ScheduleBasedTriggerContext", - "schedule": { - "repeatingTimeIntervals": [ - "R/2024-01-01T00:00:00+00:00/P1D" - ], - "timeZone": "Coordinated Universal Time" - }, - "taggingCriteria": [ - { - "isDefault": True, - "tagInfo": { - "id": "Default_", - "tagName": "Default" - }, - "taggingPriority": 99 - }, - { - "isDefault": False, - "tagInfo": { - "id": "Vault_", - "tagName": "Vault" - }, - "taggingPriority": 50, - "criteria": [ - { - "objectType": "ScheduleBasedBackupCriteria", - "absoluteCriteria": ["FirstOfDay"] - } - ] - } - ] - } + "isDefault": True, + "tagInfo": {"id": "Default_", "tagName": "Default"}, + "taggingPriority": 99 } ] + if vault_retention: + tagging_criteria.append({ + "isDefault": False, + "tagInfo": {"id": "Vault_", "tagName": "Vault"}, + "taggingPriority": 50, + "criteria": [ + { + "objectType": "ScheduleBasedBackupCriteria", + "absoluteCriteria": ["FirstOfDay"] + } + ] + }) + + # Backup rule — daily incremental + policy_rules.append({ + "backupParameters": { + "backupType": "Incremental", + "objectType": "AzureBackupParams" + }, + "dataStore": { + "dataStoreType": "OperationalStore", + "objectType": "DataStoreInfoBase" + }, + "name": "BackupDaily", + "objectType": "AzureBackupRule", + "trigger": { + "objectType": "ScheduleBasedTriggerContext", + "schedule": { + "repeatingTimeIntervals": ["R/2024-01-01T00:00:00+00:00/P1D"], + "timeZone": "Coordinated Universal Time" + }, + "taggingCriteria": tagging_criteria + } + }) return { "objectType": "BackupPolicy", - "datasourceTypes": [ - "Microsoft.ContainerService/managedClusters" - ], + "datasourceTypes": ["Microsoft.ContainerService/managedClusters"], "policyRules": policy_rules } From bdc8198d8ad98d4112162391a401a414a99702df Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 16 Mar 2026 16:28:16 +0530 Subject: [PATCH 15/24] GRS changes Signed-off-by: Anshul Ahuja --- .../manual/aks/__init__.py | 4 + .../manual/aks/aks_helper.py | 98 ++++++++----------- 2 files changed, 47 insertions(+), 55 deletions(-) create mode 100644 src/dataprotection/azext_dataprotection/manual/aks/__init__.py diff --git a/src/dataprotection/azext_dataprotection/manual/aks/__init__.py b/src/dataprotection/azext_dataprotection/manual/aks/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/dataprotection/azext_dataprotection/manual/aks/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 3fbc6d61284..69d3651b626 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -538,47 +538,36 @@ def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): return None -def _get_best_vault_storage_type(cmd, subscription_id, location): +def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name, backup_resource_group_name, cluster_location, vault_tags, storage_type): """ - Determine the best storage redundancy for a backup vault based on region capabilities. - - Uses the Azure Subscription Locations API to check: - - If region has a paired region → GeoRedundant (GRS) - - If region has availability zones → ZoneRedundant (ZRS) - - Otherwise → LocallyRedundant (LRS) + Attempt to create a backup vault with the given storage type. Returns: - str: 'GeoRedundant', 'ZoneRedundant', or 'LocallyRedundant' + backup_vault dict on success, None on failure """ - from azure.mgmt.resource import SubscriptionClient + backup_vault_args = { + "vault_name": backup_vault_name, + "resource_group": backup_resource_group_name, + "location": cluster_location, + "type": "SystemAssigned", + "storage_setting": [{'type': storage_type, 'datastore-type': 'VaultStore'}], + "soft_delete_state": "On", + "retention_duration_in_days": 14.0, + "immutability_state": "Unlocked", + "cross_subscription_restore_state": "Enabled", + "tags": vault_tags + } + + # Enable CRR only for GRS vaults (requires paired region) + if storage_type == 'GeoRedundant': + backup_vault_args["cross_region_restore_state"] = "Enabled" try: - sub_client = get_mgmt_service_client(cmd.cli_ctx, SubscriptionClient) - locations = sub_client.subscriptions.list_locations(subscription_id) - - for loc in locations: - if loc.name.lower() == location.lower(): - # Check for paired region → GRS - if hasattr(loc, 'metadata') and loc.metadata: - paired = getattr(loc.metadata, 'paired_region', None) - if paired and len(paired) > 0: - print(f"\tRegion {location} has paired region → GeoRedundant") - return 'GeoRedundant' - - # Check for availability zones → ZRS - if hasattr(loc, 'availability_zone_mappings') and loc.availability_zone_mappings: - if len(loc.availability_zone_mappings) > 0: - print(f"\tRegion {location} has availability zones → ZoneRedundant") - return 'ZoneRedundant' - - # No pair, no zones → LRS - print(f"\tRegion {location} has no paired region or zones → LocallyRedundant") - return 'LocallyRedundant' + backup_vault = vault_create_cls(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() + return backup_vault except Exception as e: - print(f"\tWarning: Could not determine region capabilities: {str(e)[:100]}") - - # Default fallback - return 'LocallyRedundant' + print(f"\tVault creation with {storage_type} failed: {str(e)[:120]}") + return None def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): @@ -615,28 +604,27 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript if resource_tags: vault_tags.update(resource_tags) - # Determine best storage type based on region capabilities - storage_type = _get_best_vault_storage_type(cmd, cluster_subscription_id, cluster_location) - print(f"\tStorage type: {storage_type}") - - backup_vault_args = { - "vault_name": backup_vault_name, - "resource_group": backup_resource_group_name, - "location": cluster_location, - "type": "SystemAssigned", - "storage_setting": [{'type': storage_type, 'datastore-type': 'VaultStore'}], - "soft_delete_state": "On", - "soft_delete_retention": 14, - "immutability_state": "Unlocked", - "cross_subscription_restore_state": "Enabled", - "tags": vault_tags - } - - # Enable CRR only for GRS vaults (requires paired region) - if storage_type == 'GeoRedundant': - backup_vault_args["cross_region_restore_state"] = "Enabled" + # Try storage types in order of preference: GRS → ZRS → LRS + # Not all regions support all types, so we fall back gracefully. + backup_vault = None + storage_type = None + + for try_type in ['GeoRedundant', 'ZoneRedundant', 'LocallyRedundant']: + print(f"\tTrying storage type: {try_type}...") + backup_vault = _try_create_vault_with_storage_type( + cmd, _BackupVaultCreate, backup_vault_name, backup_resource_group_name, + cluster_location, vault_tags, try_type) + if backup_vault: + storage_type = try_type + print(f"\tVault created with storage type: {storage_type}") + break - backup_vault = _BackupVaultCreate(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() + if not backup_vault: + raise InvalidArgumentValueError( + f"Failed to create backup vault '{backup_vault_name}' in region '{cluster_location}' " + f"with any storage type (GeoRedundant, ZoneRedundant, LocallyRedundant).\n" + f"Please check region availability and try again." + ) print(f"\tBackup Vault: {backup_vault['id']}") _check_and_assign_role( From cc9a292e7d805197a0650dbadac19f77c71e5c0d Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Wed, 18 Mar 2026 13:00:37 +0530 Subject: [PATCH 16/24] Bugbash and UTs Signed-off-by: Anshul Ahuja --- .../manual/aks/aks_helper.py | 134 +++++- .../test_dataprotection_enable_backup.py | 383 ++++++++++++++++++ 2 files changed, 502 insertions(+), 15 deletions(-) create mode 100644 src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 69d3651b626..7e9a41d7c94 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -247,14 +247,66 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name return None +def _get_cluster_msi_principal_id(cluster_resource, cluster_name): + """ + Extract the managed identity principal ID from an AKS cluster resource. + + Supports both: + - System-Assigned Managed Identity (SAMI): identity.principal_id + - User-Assigned Managed Identity (UAMI): identity.user_assigned_identities[*].principal_id + + Returns: + str: principal ID of the cluster's managed identity + Raises: + InvalidArgumentValueError if no managed identity is found + """ + identity = cluster_resource.identity + if not identity: + raise InvalidArgumentValueError( + f"Cluster '{cluster_name}' does not have a managed identity configured.\n" + f"AKS backup requires a cluster with managed identity enabled." + ) + + identity_type = getattr(identity, 'type', '') or '' + + # System-assigned identity + if identity.principal_id: + print(f"\tIdentity type: {identity_type} (system-assigned)") + return identity.principal_id + + # User-assigned identity — get the first UAMI's principal ID + user_assigned = getattr(identity, 'user_assigned_identities', None) + if user_assigned: + # user_assigned_identities is a dict: {resource_id: {principal_id, client_id}} + if isinstance(user_assigned, dict): + for uami_id, uami_info in user_assigned.items(): + principal_id = None + if isinstance(uami_info, dict): + principal_id = uami_info.get('principal_id') or uami_info.get('principalId') + else: + principal_id = getattr(uami_info, 'principal_id', None) or getattr(uami_info, 'principalId', None) + + if principal_id: + uami_name = uami_id.split('/')[-1] if '/' in uami_id else uami_id + print(f"\tIdentity type: {identity_type} (user-assigned: {uami_name})") + return principal_id + + raise InvalidArgumentValueError( + f"Could not extract managed identity principal ID from cluster '{cluster_name}'.\n" + f"Identity type: {identity_type}\n" + f"AKS backup requires a cluster with a system-assigned or user-assigned managed identity." + ) + + def _validate_cluster(resource_client, datasource_id, cluster_name): """Validate the AKS cluster exists and get its details.""" cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") cluster_location = cluster_resource.location print(f"\tCluster: {cluster_name}") print(f"\tLocation: {cluster_location}") + cluster_identity_principal_id = _get_cluster_msi_principal_id(cluster_resource, cluster_name) print("\t[OK] Cluster validated") - return cluster_resource, cluster_location + return cluster_resource, cluster_location, cluster_identity_principal_id def _find_existing_backup_resource_group(resource_client, cluster_location): @@ -523,8 +575,10 @@ def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): from azext_dataprotection.aaz.latest.dataprotection.backup_vault import List as _BackupVaultList try: - # List all backup vaults in the subscription - vaults = _BackupVaultList(cli_ctx=cmd.cli_ctx)(command_args={}) + # List all backup vaults in the cluster's subscription + vaults = _BackupVaultList(cli_ctx=cmd.cli_ctx)(command_args={ + "subscription": cluster_subscription_id + }) for vault in vaults: if vault.get('tags'): @@ -538,7 +592,7 @@ def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): return None -def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name, backup_resource_group_name, cluster_location, vault_tags, storage_type): +def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name, backup_resource_group_name, cluster_location, vault_tags, storage_type, cluster_subscription_id=None): """ Attempt to create a backup vault with the given storage type. @@ -558,6 +612,9 @@ def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name "tags": vault_tags } + if cluster_subscription_id: + backup_vault_args["subscription"] = cluster_subscription_id + # Enable CRR only for GRS vaults (requires paired region) if storage_type == 'GeoRedundant': backup_vault_args["cross_region_restore_state"] = "Enabled" @@ -583,7 +640,8 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as _BackupVaultShow backup_vault = _BackupVaultShow(cli_ctx=cmd.cli_ctx)(command_args={ "vault_name": backup_vault_name, - "resource_group": vault_rg + "resource_group": vault_rg, + "subscription": cluster_subscription_id }) else: # Search for existing backup vault with matching tag @@ -613,7 +671,7 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript print(f"\tTrying storage type: {try_type}...") backup_vault = _try_create_vault_with_storage_type( cmd, _BackupVaultCreate, backup_vault_name, backup_resource_group_name, - cluster_location, vault_tags, try_type) + cluster_location, vault_tags, try_type, cluster_subscription_id) if backup_vault: storage_type = try_type print(f"\tVault created with storage type: {storage_type}") @@ -652,7 +710,7 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript return backup_vault, backup_vault_name -def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id): +def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id, cluster_subscription_id): """Create or use backup policy.""" from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate from azext_dataprotection.aaz.latest.dataprotection.backup_policy import List as _BackupPolicyList @@ -675,7 +733,8 @@ def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_g try: policies = _BackupPolicyList(cli_ctx=cmd.cli_ctx)(command_args={ "resource_group": vault_rg_for_policy, - "vault_name": backup_vault_name + "vault_name": backup_vault_name, + "subscription": cluster_subscription_id }) for policy in policies: if policy.get('name') == backup_policy_name: @@ -696,7 +755,8 @@ def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_g "backup_policy_name": backup_policy_name, "resource_group": vault_rg_for_policy, "vault_name": backup_vault_name, - "policy": policy_config + "policy": policy_config, + "subscription": cluster_subscription_id }) print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") @@ -751,7 +811,7 @@ def _setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_n print("\t[OK] Trusted access configured - vault can now access cluster for backup operations") -def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group): +def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group, cluster_subscription_id): """Create backup instance.""" from azext_dataprotection.manual.aaz_operations.backup_instance import ValidateAndCreate as _BackupInstanceValidateAndCreate import uuid @@ -780,7 +840,8 @@ def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, data "backup_instance_name": backup_instance_name, "resource_group": vault_rg_for_bi, "vault_name": backup_vault_name, - "backup_instance": backup_instance_payload + "backup_instance": backup_instance_payload, + "subscription": cluster_subscription_id }).result() # Check and report the protection state @@ -843,6 +904,40 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy if resource_tags: print(f"Resource Tags: {json.dumps(resource_tags)}") + # Show execution plan and get user confirmation + print("\nThis command will perform the following steps:") + print(" [1] Validate the AKS cluster") + print(" [2] Create or reuse a backup resource group (AKSAzureBackup_)") + print(" [3] Create or reuse a storage account for backup data") + print(" [4] Install the data protection extension on the cluster") + print(" [5] Create or reuse a backup vault") + print(" [6] Create or reuse a backup policy") + print(" [7] Configure trusted access between vault and cluster") + print(" [8] Create a backup instance to start protection") + print("") + print("The following RBAC role assignments will be created:") + print(" - Cluster MSI → Contributor on Backup Resource Group") + print(" - Extension MSI → Storage Blob Data Contributor on Storage Account") + print(" - Vault MSI → Reader on AKS Cluster") + print(" - Vault MSI → Reader on Backup Resource Group") + print(" - Vault MSI → Disk Snapshot Contributor on Backup Resource Group") + print(" - Vault MSI → Storage Blob Data Reader on Storage Account") + print("") + print(f" Subscription: {cluster_subscription_id}") + print(f" Cluster: {cluster_name}") + print(f" Region: (will be determined from cluster)") + print(f" Strategy: {backup_strategy}") + print("") + print("NOTE: This command requires elevated privileges (Owner or") + print(" User Access Administrator) on the subscription to create") + print(" RBAC role assignments listed above.") + print("") + + from knack.prompting import prompt_y_n + if not prompt_y_n("Do you want to proceed?", default='y'): + print("Operation cancelled by user.") + return + from azure.mgmt.resource import ResourceManagementClient resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) @@ -852,13 +947,13 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy # Step 1: Validate cluster print("\n[1/8] Validating cluster...") - cluster_resource, cluster_location = _validate_cluster(resource_client, datasource_id, cluster_name) + cluster_resource, cluster_location, cluster_identity_principal_id = _validate_cluster(resource_client, datasource_id, cluster_name) # Step 2: Setup resource group print("\n[2/8] Setting up backup resource group...") backup_resource_group, backup_resource_group_name = _setup_resource_group( cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, - cluster_resource.identity.principal_id, resource_tags) + cluster_identity_principal_id, resource_tags) # Step 3 & 4: Check extension first, then handle storage account accordingly # If the extension is already installed, use its configured storage account @@ -929,18 +1024,27 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("\n[6/8] Setting up backup policy...") backup_policy = _setup_backup_policy( cmd, backup_vault, backup_vault_name, backup_resource_group_name, - backup_strategy, backup_vault_id, backup_policy_id) + backup_strategy, backup_vault_id, backup_policy_id, cluster_subscription_id) # Step 7: Setup trusted access print("\n[7/8] Setting up trusted access...") _setup_trusted_access( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault) + # Wait for role assignment propagation before creating backup instance + import time + wait_seconds = 120 + print(f"\n\tWaiting {wait_seconds} seconds for permission propagation across Azure AD...") + for remaining in range(wait_seconds, 0, -10): + print(f"\t {remaining} seconds remaining...", end='\r') + time.sleep(min(10, remaining)) + print(f"\t Permission propagation wait complete. ") + # Step 8: Create backup instance print("\n[8/8] Configuring backup instance...") backup_instance, policy_id_for_bi = _create_backup_instance( cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, - backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group) + backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group, cluster_subscription_id) # Print summary print("\n" + "=" * 60) diff --git a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py new file mode 100644 index 00000000000..c18990bd342 --- /dev/null +++ b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py @@ -0,0 +1,383 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=line-too-long + +"""Unit tests for azext_dataprotection.manual.aks.aks_helper functions.""" + +import unittest +from unittest.mock import MagicMock, patch, PropertyMock +from azure.cli.core.azclierror import InvalidArgumentValueError + +# Module under test +from azext_dataprotection.manual.aks.aks_helper import ( + _validate_request, + _get_cluster_msi_principal_id, + _get_policy_config_for_strategy, + _get_backup_instance_payload, + _generate_backup_resource_group_name, + _generate_backup_storage_account_name, + _generate_backup_storage_account_container_name, + _generate_backup_vault_name, + _generate_backup_policy_name, + _generate_trusted_access_role_binding_name, + _generate_arm_id, + _check_and_assign_role, + _find_existing_backup_resource_group, + _find_existing_backup_storage_account, + _check_existing_backup_instance, + AKS_BACKUP_TAG_KEY, +) + +# Shared test constants +SUB_ID = "00000000-0000-0000-0000-000000000001" +CLUSTER_RG = "my-rg" +CLUSTER_NAME = "my-aks" +CLUSTER_ID = ( + f"/subscriptions/{SUB_ID}/resourceGroups/{CLUSTER_RG}" + f"/providers/Microsoft.ContainerService/managedClusters/{CLUSTER_NAME}" +) +LOCATION = "eastus" + + +# --------------------------------------------------------------------------- +# _validate_request +# --------------------------------------------------------------------------- +class TestValidateRequest(unittest.TestCase): + """Tests for _validate_request parameter validation.""" + + def test_valid_week_strategy_no_config(self): + """Basic Week strategy with empty config should pass.""" + _validate_request(CLUSTER_ID, "Week", {}) + + def test_custom_strategy_missing_vault_id(self): + """Custom strategy without backupVaultId should raise.""" + with self.assertRaises(InvalidArgumentValueError): + _validate_request(CLUSTER_ID, "Custom", {"backupPolicyId": "/sub/rg/pol"}) + + def test_custom_strategy_missing_policy_id(self): + """Custom strategy without backupPolicyId should raise.""" + vault_id = f"/subscriptions/{SUB_ID}/resourceGroups/rg/providers/Microsoft.DataProtection/backupVaults/v" + with self.assertRaises(InvalidArgumentValueError): + _validate_request(CLUSTER_ID, "Custom", {"backupVaultId": vault_id}) + + def test_cross_subscription_resource_group_rejected(self): + """backupResourceGroupId in a different subscription should raise.""" + other_sub = "99999999-9999-9999-9999-999999999999" + rg_id = f"/subscriptions/{other_sub}/resourceGroups/other-rg" + with self.assertRaises(InvalidArgumentValueError): + _validate_request(CLUSTER_ID, "Week", {"backupResourceGroupId": rg_id}) + + def test_cross_subscription_storage_account_rejected(self): + """storageAccountResourceId in a different subscription should raise.""" + other_sub = "99999999-9999-9999-9999-999999999999" + sa_id = ( + f"/subscriptions/{other_sub}/resourceGroups/rg" + f"/providers/Microsoft.Storage/storageAccounts/sa1" + ) + with self.assertRaises(InvalidArgumentValueError): + _validate_request(CLUSTER_ID, "Week", {"storageAccountResourceId": sa_id}) + + def test_same_subscription_resources_accepted(self): + """Resources in the same subscription should pass validation.""" + rg_id = f"/subscriptions/{SUB_ID}/resourceGroups/backup-rg" + sa_id = f"/subscriptions/{SUB_ID}/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/sa" + vault_id = f"/subscriptions/{SUB_ID}/resourceGroups/rg/providers/Microsoft.DataProtection/backupVaults/v" + _validate_request(CLUSTER_ID, "Custom", { + "backupResourceGroupId": rg_id, + "storageAccountResourceId": sa_id, + "backupVaultId": vault_id, + "backupPolicyId": vault_id + "/backupPolicies/pol", + }) + + def test_invalid_json_string_raises(self): + """String config that is invalid JSON should raise.""" + with self.assertRaises(InvalidArgumentValueError): + _validate_request(CLUSTER_ID, "Week", "not-valid-json") + + def test_none_config_treated_as_empty(self): + """None configuration_params should be treated as empty dict.""" + _validate_request(CLUSTER_ID, "Week", None) + + +# --------------------------------------------------------------------------- +# _get_cluster_msi_principal_id +# --------------------------------------------------------------------------- +class TestGetClusterMsiPrincipalId(unittest.TestCase): + """Tests for _get_cluster_msi_principal_id identity extraction.""" + + def _make_cluster(self, principal_id=None, identity_type="SystemAssigned", user_assigned=None): + cluster = MagicMock() + cluster.identity.principal_id = principal_id + cluster.identity.type = identity_type + cluster.identity.user_assigned_identities = user_assigned + return cluster + + def test_system_assigned_identity(self): + cluster = self._make_cluster(principal_id="sami-pid-123") + result = _get_cluster_msi_principal_id(cluster, "aks1") + self.assertEqual(result, "sami-pid-123") + + def test_user_assigned_identity_dict(self): + uami = {"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1": {"principal_id": "uami-pid-456"}} + cluster = self._make_cluster(principal_id=None, identity_type="UserAssigned", user_assigned=uami) + result = _get_cluster_msi_principal_id(cluster, "aks1") + self.assertEqual(result, "uami-pid-456") + + def test_no_identity_raises(self): + cluster = MagicMock() + cluster.identity = None + with self.assertRaises(InvalidArgumentValueError): + _get_cluster_msi_principal_id(cluster, "aks-no-id") + + def test_identity_without_principal_raises(self): + cluster = self._make_cluster(principal_id=None, user_assigned=None) + with self.assertRaises(InvalidArgumentValueError): + _get_cluster_msi_principal_id(cluster, "aks-no-pid") + + +# --------------------------------------------------------------------------- +# _get_policy_config_for_strategy +# --------------------------------------------------------------------------- +class TestGetPolicyConfigForStrategy(unittest.TestCase): + """Tests for _get_policy_config_for_strategy policy generation.""" + + def test_week_strategy_retention(self): + policy = _get_policy_config_for_strategy("Week") + retention_rules = [r for r in policy["policyRules"] if r["objectType"] == "AzureRetentionRule"] + self.assertEqual(len(retention_rules), 1) + self.assertEqual(retention_rules[0]["lifecycles"][0]["deleteAfter"]["duration"], "P7D") + + def test_month_strategy_retention(self): + policy = _get_policy_config_for_strategy("Month") + retention_rules = [r for r in policy["policyRules"] if r["objectType"] == "AzureRetentionRule"] + self.assertEqual(len(retention_rules), 1) + self.assertEqual(retention_rules[0]["lifecycles"][0]["deleteAfter"]["duration"], "P30D") + + def test_disaster_recovery_has_vault_tier(self): + policy = _get_policy_config_for_strategy("DisasterRecovery") + retention_rules = [r for r in policy["policyRules"] if r["objectType"] == "AzureRetentionRule"] + self.assertEqual(len(retention_rules), 2) + vault_rule = [r for r in retention_rules if r["name"] == "Vault"] + self.assertEqual(len(vault_rule), 1) + self.assertEqual(vault_rule[0]["lifecycles"][0]["deleteAfter"]["duration"], "P90D") + + def test_unknown_strategy_raises(self): + with self.assertRaises(InvalidArgumentValueError): + _get_policy_config_for_strategy("InvalidStrategy") + + def test_policy_has_backup_rule(self): + policy = _get_policy_config_for_strategy("Week") + backup_rules = [r for r in policy["policyRules"] if r["objectType"] == "AzureBackupRule"] + self.assertEqual(len(backup_rules), 1) + self.assertEqual(backup_rules[0]["backupParameters"]["backupType"], "Incremental") + + +# --------------------------------------------------------------------------- +# _get_backup_instance_payload +# --------------------------------------------------------------------------- +class TestGetBackupInstancePayload(unittest.TestCase): + """Tests for _get_backup_instance_payload structure.""" + + def test_payload_structure(self): + policy_id = "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.DataProtection/backupVaults/v/backupPolicies/p" + rg_id = f"/subscriptions/{SUB_ID}/resourceGroups/backup-rg" + payload = _get_backup_instance_payload("bi-name", CLUSTER_NAME, CLUSTER_ID, LOCATION, policy_id, rg_id) + + props = payload["properties"] + self.assertEqual(props["object_type"], "BackupInstance") + self.assertEqual(props["data_source_info"]["resource_id"], CLUSTER_ID) + self.assertEqual(props["data_source_info"]["resource_location"], LOCATION) + self.assertEqual(props["policy_info"]["policy_id"], policy_id) + ds_params = props["policy_info"]["policy_parameters"]["backup_datasource_parameters_list"][0] + self.assertTrue(ds_params["include_cluster_scope_resources"]) + self.assertTrue(ds_params["snapshot_volumes"]) + + +# --------------------------------------------------------------------------- +# Name generators +# --------------------------------------------------------------------------- +class TestNameGenerators(unittest.TestCase): + """Tests for _generate_* naming functions.""" + + def test_resource_group_name(self): + self.assertEqual(_generate_backup_resource_group_name("eastus"), "AKSAzureBackup_eastus") + + def test_storage_account_name_constraints(self): + name = _generate_backup_storage_account_name("East US 2") + self.assertTrue(name.islower() or name.isdigit()) + self.assertLessEqual(len(name), 24) + self.assertGreaterEqual(len(name), 3) + self.assertTrue(name.startswith("aksbkp")) + + def test_storage_account_name_uniqueness(self): + """Two calls should produce different names (GUID suffix).""" + a = _generate_backup_storage_account_name("eastus") + b = _generate_backup_storage_account_name("eastus") + self.assertNotEqual(a, b) + + def test_container_name_sanitisation(self): + name = _generate_backup_storage_account_container_name("My_AKS-Cluster!", "MY-RG") + self.assertTrue(all(c.isalnum() or c == '-' for c in name)) + self.assertLessEqual(len(name), 63) + + def test_vault_name(self): + self.assertEqual(_generate_backup_vault_name("westus2"), "AKSAzureBackup-westus2") + + def test_policy_name(self): + self.assertEqual(_generate_backup_policy_name("Week"), "AKSBackupPolicy-Week") + + def test_trusted_access_binding_name_length(self): + name = _generate_trusted_access_role_binding_name() + self.assertTrue(name.startswith("tarb-")) + self.assertLessEqual(len(name), 24) + + def test_arm_id_format(self): + arm_id = _generate_arm_id(SUB_ID, "rg1", "Microsoft.Compute/virtualMachines", "vm1") + self.assertIn(SUB_ID, arm_id) + self.assertIn("rg1", arm_id) + self.assertIn("vm1", arm_id) + + +# --------------------------------------------------------------------------- +# _check_and_assign_role +# --------------------------------------------------------------------------- +class TestCheckAndAssignRole(unittest.TestCase): + """Tests for _check_and_assign_role with mocked role assignment APIs.""" + + ROLE_MODULE = "azure.cli.command_modules.role.custom" + + @patch(f"{ROLE_MODULE}.create_role_assignment") + @patch(f"{ROLE_MODULE}.list_role_assignments", return_value=[{"id": "existing"}]) + def test_existing_role_returns_true(self, mock_list, mock_create): + cmd = MagicMock() + result = _check_and_assign_role(cmd, "Reader", "pid", "/scope") + self.assertTrue(result) + mock_create.assert_not_called() + + @patch(f"{ROLE_MODULE}.create_role_assignment") + @patch(f"{ROLE_MODULE}.list_role_assignments", return_value=[]) + def test_creates_role_when_missing(self, mock_list, mock_create): + cmd = MagicMock() + result = _check_and_assign_role(cmd, "Reader", "pid", "/scope") + self.assertTrue(result) + mock_create.assert_called_once() + + @patch(f"{ROLE_MODULE}.create_role_assignment", side_effect=Exception("Conflict: already exists")) + @patch(f"{ROLE_MODULE}.list_role_assignments", return_value=[]) + def test_conflict_treated_as_success(self, mock_list, mock_create): + cmd = MagicMock() + result = _check_and_assign_role(cmd, "Reader", "pid", "/scope") + self.assertTrue(result) + + @patch(f"{ROLE_MODULE}.create_role_assignment", side_effect=Exception("Authorization failed: forbidden")) + @patch(f"{ROLE_MODULE}.list_role_assignments", return_value=[]) + def test_permission_denied_raises(self, mock_list, mock_create): + cmd = MagicMock() + with self.assertRaises(InvalidArgumentValueError): + _check_and_assign_role(cmd, "Reader", "pid", "/scope") + + +# --------------------------------------------------------------------------- +# _find_existing_backup_resource_group +# --------------------------------------------------------------------------- +class TestFindExistingBackupResourceGroup(unittest.TestCase): + """Tests for tag-based resource group discovery.""" + + def _make_rg(self, name, tags=None): + rg = MagicMock() + rg.name = name + rg.tags = tags + return rg + + def test_finds_matching_rg(self): + rg = self._make_rg("AKSAzureBackup_eastus", {AKS_BACKUP_TAG_KEY: "eastus"}) + client = MagicMock() + client.resource_groups.list.return_value = [rg] + result = _find_existing_backup_resource_group(client, "eastus") + self.assertEqual(result.name, "AKSAzureBackup_eastus") + + def test_returns_none_when_no_match(self): + rg = self._make_rg("other-rg", {"env": "prod"}) + client = MagicMock() + client.resource_groups.list.return_value = [rg] + result = _find_existing_backup_resource_group(client, "eastus") + self.assertIsNone(result) + + def test_returns_none_on_exception(self): + client = MagicMock() + client.resource_groups.list.side_effect = Exception("API error") + result = _find_existing_backup_resource_group(client, "eastus") + self.assertIsNone(result) + + +# --------------------------------------------------------------------------- +# _find_existing_backup_storage_account +# --------------------------------------------------------------------------- +class TestFindExistingBackupStorageAccount(unittest.TestCase): + """Tests for tag-based storage account discovery.""" + + def _make_sa(self, name, location_tag, sa_id=None): + sa = MagicMock() + sa.name = name + sa.tags = {AKS_BACKUP_TAG_KEY: location_tag} if location_tag else {} + sa.id = sa_id or f"/subscriptions/{SUB_ID}/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/{name}" + return sa + + def test_finds_matching_storage_account(self): + sa = self._make_sa("aksbkpeastus123", "eastus") + client = MagicMock() + client.storage_accounts.list.return_value = [sa] + result_sa, result_rg = _find_existing_backup_storage_account(client, "eastus") + self.assertEqual(result_sa.name, "aksbkpeastus123") + + def test_returns_none_when_no_match(self): + sa = self._make_sa("mysa", "westus") + client = MagicMock() + client.storage_accounts.list.return_value = [sa] + result_sa, result_rg = _find_existing_backup_storage_account(client, "eastus") + self.assertIsNone(result_sa) + + +# --------------------------------------------------------------------------- +# _check_existing_backup_instance +# --------------------------------------------------------------------------- +class TestCheckExistingBackupInstance(unittest.TestCase): + """Tests for _check_existing_backup_instance extension routing check.""" + + def test_no_backup_instance_returns_none(self): + """Empty list means no existing BI — should return None.""" + client = MagicMock() + response = MagicMock() + response.value = [] + response.additional_properties = {"value": []} + client.resources.get_by_id.return_value = response + result = _check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME) + self.assertIsNone(result) + + def test_existing_backup_instance_raises(self): + """Existing BI should raise InvalidArgumentValueError.""" + client = MagicMock() + vault_bi_id = ( + f"/subscriptions/{SUB_ID}/resourceGroups/rg" + f"/providers/Microsoft.DataProtection/backupVaults/vault1" + f"/backupInstances/bi-12345" + ) + response = MagicMock() + response.value = [{"id": vault_bi_id, "name": "bi-12345", "properties": {"currentProtectionState": "ProtectionConfigured"}}] + client.resources.get_by_id.return_value = response + with self.assertRaises(InvalidArgumentValueError): + _check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME) + + def test_404_returns_none(self): + """404 from ARM means no backup instances — should return None.""" + client = MagicMock() + client.resources.get_by_id.side_effect = Exception("Resource not found (404)") + result = _check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME) + self.assertIsNone(result) + + +if __name__ == "__main__": + unittest.main() From b474a9fded2ba886a0309e7ad7fc419c9b2f5596 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Wed, 18 Mar 2026 14:09:11 +0530 Subject: [PATCH 17/24] fix changelog Signed-off-by: Anshul Ahuja --- src/dataprotection/HISTORY.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dataprotection/HISTORY.rst b/src/dataprotection/HISTORY.rst index 3cd1325fac7..0ab1d81e5b3 100644 --- a/src/dataprotection/HISTORY.rst +++ b/src/dataprotection/HISTORY.rst @@ -4,7 +4,7 @@ Release History =============== 1.9.0 +++++ -* `az dataprotection enable-backup trigger`: New command to enable backup for AKS clusters with a single command. Supports preset backup strategies (Week, Month, Immutable, DisasterRecovery) and Custom strategy with user-provided configuration. +* `az dataprotection enable-backup trigger`: New command to enable backup for AKS clusters with a single command. Supports preset backup strategies (Week, Month, DisasterRecovery) and Custom strategy with user-provided configuration. * Added vendored SDKs: `azure-mgmt-containerservice` (40.2.0), `azure-mgmt-kubernetesconfiguration` (3.1.0), `azure-mgmt-resourcegraph` (8.0.0). 1.8.0 From 487175d2fe308d4d13a67597434f91fe3528c9fa Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Wed, 18 Mar 2026 14:39:54 +0530 Subject: [PATCH 18/24] lint Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/aks/aks_helper.py | 4 ++-- .../latest/test_dataprotection_enable_backup.py | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 7e9a41d7c94..d2893a27100 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -925,7 +925,7 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("") print(f" Subscription: {cluster_subscription_id}") print(f" Cluster: {cluster_name}") - print(f" Region: (will be determined from cluster)") + print(" Region: (will be determined from cluster)") print(f" Strategy: {backup_strategy}") print("") print("NOTE: This command requires elevated privileges (Owner or") @@ -1038,7 +1038,7 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy for remaining in range(wait_seconds, 0, -10): print(f"\t {remaining} seconds remaining...", end='\r') time.sleep(min(10, remaining)) - print(f"\t Permission propagation wait complete. ") + print("\t Permission propagation wait complete. ") # Step 8: Create backup instance print("\n[8/8] Configuring backup instance...") diff --git a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py index c18990bd342..9ee5ef09757 100644 --- a/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py +++ b/src/dataprotection/azext_dataprotection/tests/latest/test_dataprotection_enable_backup.py @@ -4,11 +4,12 @@ # license information. # -------------------------------------------------------------------------- # pylint: disable=line-too-long +# pylint: disable=missing-function-docstring """Unit tests for azext_dataprotection.manual.aks.aks_helper functions.""" import unittest -from unittest.mock import MagicMock, patch, PropertyMock +from unittest.mock import MagicMock, patch from azure.cli.core.azclierror import InvalidArgumentValueError # Module under test @@ -330,14 +331,14 @@ def test_finds_matching_storage_account(self): sa = self._make_sa("aksbkpeastus123", "eastus") client = MagicMock() client.storage_accounts.list.return_value = [sa] - result_sa, result_rg = _find_existing_backup_storage_account(client, "eastus") + result_sa, _ = _find_existing_backup_storage_account(client, "eastus") self.assertEqual(result_sa.name, "aksbkpeastus123") def test_returns_none_when_no_match(self): sa = self._make_sa("mysa", "westus") client = MagicMock() client.storage_accounts.list.return_value = [sa] - result_sa, result_rg = _find_existing_backup_storage_account(client, "eastus") + result_sa, _ = _find_existing_backup_storage_account(client, "eastus") self.assertIsNone(result_sa) @@ -354,8 +355,7 @@ def test_no_backup_instance_returns_none(self): response.value = [] response.additional_properties = {"value": []} client.resources.get_by_id.return_value = response - result = _check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME) - self.assertIsNone(result) + self.assertIsNone(_check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME)) def test_existing_backup_instance_raises(self): """Existing BI should raise InvalidArgumentValueError.""" @@ -375,8 +375,7 @@ def test_404_returns_none(self): """404 from ARM means no backup instances — should return None.""" client = MagicMock() client.resources.get_by_id.side_effect = Exception("Resource not found (404)") - result = _check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME) - self.assertIsNone(result) + self.assertIsNone(_check_existing_backup_instance(client, CLUSTER_ID, CLUSTER_NAME)) if __name__ == "__main__": From 0764b0fd62ea9451cb41aecf2a1f30ca02d78c92 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Wed, 18 Mar 2026 15:13:52 +0530 Subject: [PATCH 19/24] Fix lint errors Signed-off-by: Anshul Ahuja --- src/dataprotection/azext_dataprotection/manual/custom.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 388bec4887f..33342741ef9 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -15,7 +15,6 @@ # pylint: disable=no-else-raise import json import time -import json from azure.cli.core.azclierror import ( RequiredArgumentMissingError, InvalidArgumentValueError, @@ -1200,7 +1199,6 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_str from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper dataprotection_enable_backup_helper(cmd, datasource_id, backup_strategy, config) - return def _parse_backup_configuration(backup_configuration_file): @@ -1215,8 +1213,6 @@ def _parse_backup_configuration(backup_configuration_file): Returns: dict: Parsed configuration """ - import json - if backup_configuration_file is None: return {} From cf15bb21854ea17ea87de34b0298f8328468ae3f Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Thu, 19 Mar 2026 10:36:14 +0530 Subject: [PATCH 20/24] linter 10/10 Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/_params.py | 2 + .../manual/aks/aks_helper.py | 509 ++++++++++++------ .../azext_dataprotection/manual/custom.py | 8 +- 3 files changed, 347 insertions(+), 172 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index 7ca86045e42..040ebad8ae3 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -196,6 +196,8 @@ def load_arguments(self, _): help="Path to backup configuration file (JSON) or inline JSON string. " "Available settings: storageAccountResourceId, blobContainerName, backupResourceGroupId, " "backupVaultId (required for Custom), backupPolicyId (required for Custom), tags.") + c.argument('yes', options_list=['--yes', '-y'], action='store_true', + help='Do not prompt for confirmation.') with self.argument_context('dataprotection job show') as c: c.argument('resource_group_name', resource_group_name_type) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index d2893a27100..392810fefd3 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -33,7 +33,7 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", if list_role_assignments(cmd, assignee=assignee, role=role, scope=scope, include_inherited=True): print(f"\tRole '{role}' already assigned to {identity_name}") return True - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"\tWarning: Could not list role assignments for {identity_name}: {str(e)[:100]}") # Continue to try creating the assignment @@ -43,7 +43,7 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", create_role_assignment(cmd, role=role, assignee=assignee, scope=scope) print(f"\tRole '{role}' assigned to {identity_name}") return True - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught error_str = str(e).lower() # Already exists — treat as success @@ -52,7 +52,11 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", return True # Principal not found — retryable (identity propagation) - is_propagation_error = "principal" in error_str or "does not exist" in error_str or "cannot find" in error_str + is_propagation_error = ( + "principal" in error_str + or "does not exist" in error_str + or "cannot find" in error_str + ) if is_propagation_error and attempt < max_retries - 1: print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") time.sleep(retry_delay) @@ -124,7 +128,8 @@ def _validate_request(datasource_id, backup_strategy, configuration_params): if rg_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( f"backupResourceGroupId must be in the same subscription as the cluster. " - f"Cluster subscription: {cluster_subscription_id}, Resource group subscription: {rg_parts['subscription']}" + f"Cluster subscription: {cluster_subscription_id}, " + f"Resource group subscription: {rg_parts['subscription']}" ) storage_account_id = configuration_params.get("storageAccountResourceId") @@ -133,7 +138,8 @@ def _validate_request(datasource_id, backup_strategy, configuration_params): if sa_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( f"storageAccountResourceId must be in the same subscription as the cluster. " - f"Cluster subscription: {cluster_subscription_id}, Storage account subscription: {sa_parts['subscription']}" + f"Cluster subscription: {cluster_subscription_id}, " + f"Storage account subscription: {sa_parts['subscription']}" ) backup_vault_id = configuration_params.get("backupVaultId") @@ -142,7 +148,8 @@ def _validate_request(datasource_id, backup_strategy, configuration_params): if vault_parts['subscription'].lower() != cluster_subscription_id.lower(): raise InvalidArgumentValueError( f"backupVaultId must be in the same subscription as the cluster. " - f"Cluster subscription: {cluster_subscription_id}, Backup vault subscription: {vault_parts['subscription']}" + f"Cluster subscription: {cluster_subscription_id}, " + f"Backup vault subscription: {vault_parts['subscription']}" ) @@ -194,7 +201,8 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name protection_error = getattr(bi_properties, 'protection_error_details', None) # Parse vault info from the BI resource ID - # Format: /subscriptions/.../resourceGroups/.../providers/Microsoft.DataProtection/backupVaults/{vault}/backupInstances/{bi} + # Format: /subscriptions/../resourceGroups/../providers/ + # Microsoft.DataProtection/backupVaults/{vault}/backupInstances/{bi} vault_name = "Unknown" vault_rg = "Unknown" if bi_id and '/backupvaults/' in str(bi_id).lower(): @@ -210,8 +218,14 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name error_info = "" if protection_error: - error_msg = protection_error.get('message', str(protection_error)) if isinstance(protection_error, dict) else str(protection_error) - print(f"\t\t- Error Details: {error_msg[:100]}..." if len(str(error_msg)) > 100 else f" - Error Details: {error_msg}") + if isinstance(protection_error, dict): + error_msg = protection_error.get('message', str(protection_error)) + else: + error_msg = str(protection_error) + if len(str(error_msg)) > 100: + print(f"\t\t- Error Details: {error_msg[:100]}...") + else: + print(f"\t\t- Error Details: {error_msg}") error_info = f"\n Protection Error: {error_msg}\n" raise InvalidArgumentValueError( @@ -231,10 +245,9 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name ) except InvalidArgumentValueError: - # Re-raise our own error raise - except Exception as e: - # 404 or other errors mean no backup instance exists - that's fine + except Exception as e: # pylint: disable=broad-exception-caught + # 404 or other errors mean no backup instance exists error_str = str(e).lower() if "not found" in error_str or "404" in error_str or "does not exist" in error_str: print("\tNo existing backup instance found") @@ -326,23 +339,27 @@ def _find_existing_backup_resource_group(resource_client, cluster_location): tag_value = rg.tags.get(AKS_BACKUP_TAG_KEY) if tag_value and tag_value.lower() == cluster_location.lower(): return rg - except Exception: + except Exception: # pylint: disable=broad-exception-caught # If we can't list resource groups, we'll create a new one pass return None -def _setup_resource_group(cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, cluster_identity_principal_id, resource_tags): +def _setup_resource_group(cmd, resource_client, backup_resource_group_id, + cluster_location, _cluster_name, + cluster_identity_principal_id, resource_tags): """Create or use backup resource group.""" if backup_resource_group_id: backup_resource_group_name = parse_resource_id(backup_resource_group_id)['resource_group'] print(f"\tUsing provided resource group: {backup_resource_group_name}") try: backup_resource_group = resource_client.resource_groups.get(backup_resource_group_name) - except Exception: + except Exception: # pylint: disable=broad-exception-caught raise InvalidArgumentValueError( f"Resource group '{backup_resource_group_name}' not found. " - f"Please ensure the resource group exists or remove 'backupResourceGroupId' from configuration to create one automatically." + "Please ensure the resource group exists or remove " + "'backupResourceGroupId' from configuration to create " + "one automatically." ) else: # Search for existing backup resource group with matching tag @@ -364,7 +381,8 @@ def _setup_resource_group(cmd, resource_client, backup_resource_group_id, cluste rg_tags.update(resource_tags) rg_params = {"location": cluster_location, "tags": rg_tags} - backup_resource_group = resource_client.resource_groups.create_or_update(backup_resource_group_name, rg_params) + backup_resource_group = resource_client.resource_groups.create_or_update( + backup_resource_group_name, rg_params) print(f"\tResource Group: {backup_resource_group.id}") _check_and_assign_role( @@ -397,18 +415,23 @@ def _find_existing_backup_storage_account(storage_client, cluster_location): # Parse resource group from the SA id sa_parts = parse_resource_id(sa.id) return sa, sa_parts['resource_group'] - except Exception: + except Exception: # pylint: disable=broad-exception-caught # If we can't list storage accounts, we'll create a new one pass return None, None -def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags): +def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, + blob_container_name, backup_resource_group_name, + cluster_location, cluster_name, + cluster_resource_group_name, resource_tags): """Create or use storage account.""" from azure.mgmt.storage import StorageManagementClient - storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) - storage_account_rg = backup_resource_group_name # Default to backup RG + storage_client = get_mgmt_service_client( + cmd.cli_ctx, StorageManagementClient, + subscription_id=cluster_subscription_id) + storage_account_rg = backup_resource_group_name if storage_account_id: # Use provided storage account @@ -416,8 +439,13 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blo backup_storage_account_name = sa_parts['name'] storage_account_rg = sa_parts['resource_group'] print(f"\tUsing provided storage account: {backup_storage_account_name}") - backup_storage_account = storage_client.storage_accounts.get_properties(storage_account_rg, backup_storage_account_name) - backup_storage_account_container_name = blob_container_name if blob_container_name else _generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + backup_storage_account = storage_client.storage_accounts.get_properties( + storage_account_rg, backup_storage_account_name) + if blob_container_name: + backup_storage_account_container_name = blob_container_name + else: + backup_storage_account_container_name = _generate_backup_storage_account_container_name( + cluster_name, cluster_resource_group_name) else: # Search for existing backup storage account with matching tag print(f"\tSearching for existing AKS backup storage account in region {cluster_location}...") @@ -451,17 +479,25 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, blo account_name=backup_storage_account_name, parameters=storage_params).result() - backup_storage_account_container_name = _generate_backup_storage_account_container_name(cluster_name, cluster_resource_group_name) + backup_storage_account_container_name = _generate_backup_storage_account_container_name( + cluster_name, cluster_resource_group_name) print(f"\tStorage Account: {backup_storage_account.id}") print(f"\tCreating blob container: {backup_storage_account_container_name}") - storage_client.blob_containers.create(storage_account_rg, backup_storage_account_name, backup_storage_account_container_name, {}) + storage_client.blob_containers.create( + storage_account_rg, backup_storage_account_name, + backup_storage_account_container_name, {}) print("\t[OK] Storage account ready") return backup_storage_account, backup_storage_account_name, backup_storage_account_container_name -def _install_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_storage_account_name, backup_storage_account_container_name, backup_resource_group_name, backup_storage_account): +def _install_backup_extension(cmd, cluster_subscription_id, + cluster_resource_group_name, cluster_name, + backup_storage_account_name, + backup_storage_account_container_name, + backup_resource_group_name, + backup_storage_account): """Install backup extension on the cluster.""" backup_extension = _create_backup_extension( cmd, @@ -484,7 +520,8 @@ def _install_backup_extension(cmd, cluster_subscription_id, cluster_resource_gro return backup_extension -def _get_existing_backup_extension(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name): +def _get_existing_backup_extension(cmd, cluster_subscription_id, + cluster_resource_group_name, cluster_name): """ Check if a backup extension already exists on the cluster. @@ -492,8 +529,11 @@ def _get_existing_backup_extension(cmd, cluster_subscription_id, cluster_resourc extension object if found and healthy, None if not found. Raises on Failed or transient states. """ - from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient - k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=cluster_subscription_id) + from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import ( + SourceControlConfigurationClient) + k8s_configuration_client = get_mgmt_service_client( + cmd.cli_ctx, SourceControlConfigurationClient, + subscription_id=cluster_subscription_id) try: extensions = k8s_configuration_client.extensions.list( @@ -504,27 +544,35 @@ def _get_existing_backup_extension(cmd, cluster_subscription_id, cluster_resourc for page in extensions.by_page(): for extension in page: - if extension.extension_type and extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': + ext_type = extension.extension_type + if ext_type and ext_type.lower() == 'microsoft.dataprotection.kubernetes': provisioning_state = extension.provisioning_state if provisioning_state == "Succeeded": return extension - elif provisioning_state == "Failed": + if provisioning_state == "Failed": raise InvalidArgumentValueError( - f"Data protection extension '{extension.name}' exists on cluster '{cluster_name}' but is in Failed state.\n" + f"Data protection extension '{extension.name}' exists " + f"on cluster '{cluster_name}' but is in Failed state.\n" f"Please take corrective action before running this command again:\n" - f" 1. Check extension logs: az k8s-extension show --name {extension.name} --cluster-name {cluster_name} --resource-group {cluster_resource_group_name} --cluster-type managedClusters\n" - f" 2. Delete the failed extension: az k8s-extension delete --name {extension.name} --cluster-name {cluster_name} --resource-group {cluster_resource_group_name} --cluster-type managedClusters --yes\n" + f" 1. Check extension logs: az k8s-extension show " + f"--name {extension.name} --cluster-name {cluster_name} " + f"--resource-group {cluster_resource_group_name} " + f"--cluster-type managedClusters\n" + f" 2. Delete the failed extension: az k8s-extension delete " + f"--name {extension.name} --cluster-name {cluster_name} " + f"--resource-group {cluster_resource_group_name} " + f"--cluster-type managedClusters --yes\n" f" 3. Re-run this command to install a fresh extension.\n" f"For troubleshooting, visit: https://aka.ms/aksclusterbackup" ) - else: - raise InvalidArgumentValueError( - f"Data protection extension '{extension.name}' is in '{provisioning_state}' state.\n" - f"Please wait for the operation to complete and try again." - ) + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' " + f"is in '{provisioning_state}' state.\n" + f"Please wait for the operation to complete and try again." + ) except InvalidArgumentValueError: raise - except Exception: + except Exception: # pylint: disable=broad-exception-caught pass return None @@ -552,14 +600,20 @@ def _get_storage_account_from_extension(cmd, extension, cluster_subscription_id) if not sa_name or not sa_rg: return None, None, None, None - print(f"\tExtension is configured with storage account: {sa_name} (RG: {sa_rg}, container: {container})") + print( + f"\tExtension is configured with storage account: " + f"{sa_name} (RG: {sa_rg}, container: {container})") - storage_client = get_mgmt_service_client(cmd.cli_ctx, StorageManagementClient, subscription_id=cluster_subscription_id) + storage_client = get_mgmt_service_client( + cmd.cli_ctx, StorageManagementClient, + subscription_id=cluster_subscription_id) try: sa = storage_client.storage_accounts.get_properties(sa_rg, sa_name) return sa, sa_name, container, sa_rg - except Exception as e: - print(f"\tWarning: Could not fetch storage account '{sa_name}' from extension config: {str(e)[:100]}") + except Exception as e: # pylint: disable=broad-exception-caught + print( + f"\tWarning: Could not fetch storage account '{sa_name}' " + f"from extension config: {str(e)[:100]}") return None, None, None, None @@ -586,13 +640,16 @@ def _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location): tag_value = vault['tags'].get(AKS_BACKUP_TAG_KEY) if tag_value and tag_value.lower() == cluster_location.lower(): return vault - except Exception: + except Exception: # pylint: disable=broad-exception-caught # If we can't list vaults, we'll create a new one pass return None -def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name, backup_resource_group_name, cluster_location, vault_tags, storage_type, cluster_subscription_id=None): +def _try_create_vault_with_storage_type( + cmd, vault_create_cls, backup_vault_name, + backup_resource_group_name, cluster_location, vault_tags, + storage_type, cluster_subscription_id=None): """ Attempt to create a backup vault with the given storage type. @@ -622,12 +679,15 @@ def _try_create_vault_with_storage_type(cmd, vault_create_cls, backup_vault_name try: backup_vault = vault_create_cls(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() return backup_vault - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"\tVault creation with {storage_type} failed: {str(e)[:120]}") return None -def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags): +def _setup_backup_vault( + cmd, backup_strategy, backup_vault_id, cluster_subscription_id, + cluster_location, backup_resource_group_name, cluster_resource, + backup_resource_group, resource_tags): """Create or use backup vault.""" from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Create as _BackupVaultCreate @@ -710,7 +770,10 @@ def _setup_backup_vault(cmd, backup_strategy, backup_vault_id, cluster_subscript return backup_vault, backup_vault_name -def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy_id, cluster_subscription_id): +def _setup_backup_policy(cmd, _backup_vault, backup_vault_name, + backup_resource_group_name, backup_strategy, + backup_vault_id, backup_policy_id, + cluster_subscription_id): """Create or use backup policy.""" from azext_dataprotection.manual.aaz_operations.backup_policy import Create as _BackupPolicyCreate from azext_dataprotection.aaz.latest.dataprotection.backup_policy import List as _BackupPolicyList @@ -740,7 +803,7 @@ def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_g if policy.get('name') == backup_policy_name: existing_policy = policy break - except Exception: + except Exception: # pylint: disable=broad-exception-caught pass if existing_policy: @@ -759,18 +822,22 @@ def _setup_backup_policy(cmd, backup_vault, backup_vault_name, backup_resource_g "subscription": cluster_subscription_id }) - print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id if backup_policy_id else 'N/A')}") + print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id or 'N/A')}") print("\t[OK] Backup policy ready") return backup_policy -def _setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault): +def _setup_trusted_access(cmd, cluster_subscription_id, + cluster_resource_group_name, cluster_name, + backup_vault): """Setup trusted access role binding between backup vault and cluster.""" from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice import ContainerServiceClient from azext_dataprotection.vendored_sdks.azure_mgmt_containerservice.models import TrustedAccessRoleBinding - cluster_client = get_mgmt_service_client(cmd.cli_ctx, ContainerServiceClient, subscription_id=cluster_subscription_id) + cluster_client = get_mgmt_service_client( + cmd.cli_ctx, ContainerServiceClient, + subscription_id=cluster_subscription_id) vault_id = backup_vault["id"] vault_name = backup_vault["name"] @@ -790,7 +857,7 @@ def _setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_n print(f"\tFound existing binding: {binding.name}") print("\t[OK] Trusted access already configured") return - except Exception: + except Exception: # pylint: disable=broad-exception-caught # If we can't list, we'll try to create pass @@ -811,9 +878,14 @@ def _setup_trusted_access(cmd, cluster_subscription_id, cluster_resource_group_n print("\t[OK] Trusted access configured - vault can now access cluster for backup operations") -def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group, cluster_subscription_id): +def _create_backup_instance( + cmd, cluster_name, _cluster_resource_group_name, datasource_id, + cluster_location, backup_vault_name, backup_resource_group_name, + backup_strategy, backup_vault_id, backup_policy, backup_policy_id, + backup_resource_group, cluster_subscription_id): """Create backup instance.""" - from azext_dataprotection.manual.aaz_operations.backup_instance import ValidateAndCreate as _BackupInstanceValidateAndCreate + from azext_dataprotection.manual.aaz_operations.backup_instance import ( + ValidateAndCreate as _BackupInstanceValidateAndCreate) import uuid backup_instance_name = f"{cluster_name}-{str(uuid.uuid4())[:8]}" @@ -853,61 +925,54 @@ def _create_backup_instance(cmd, cluster_name, cluster_resource_group_name, data elif protection_state == "ConfiguringProtection": print("\t[OK] Backup instance created - protection configuration in progress") elif protection_state == "ProtectionError": - error_details = backup_instance.get('properties', {}).get('protectionErrorDetails', {}) - error_msg = error_details.get('message', 'Unknown error') if isinstance(error_details, dict) else str(error_details) - print(f"\t[WARNING] Backup instance created but protection has errors: {error_msg}") + error_details = backup_instance.get('properties', {}).get( + 'protectionErrorDetails', {}) + if isinstance(error_details, dict): + error_msg = error_details.get('message', 'Unknown error') + else: + error_msg = str(error_details) + print( + f"\t[WARNING] Backup instance created but " + f"protection has errors: {error_msg}") else: print("\t[OK] Backup instance created") return backup_instance, policy_id_for_bi -def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy='Week', configuration_params=None): - """ - Enable backup for an AKS cluster. +def _parse_and_validate_config(datasource_id, backup_strategy, configuration_params): + """Parse, validate configuration and extract settings. - Args: - cmd: CLI command context - datasource_id: Full ARM resource ID of the AKS cluster - backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) - configuration_params: Dict with configuration settings + Returns: + tuple: (config_dict, cluster_subscription_id, cluster_resource_group_name, + cluster_name) """ - print("=" * 60) - print("Enabling backup for AKS cluster") - print("=" * 60) - print(f"Datasource ID: {datasource_id}") - print(f"Backup Strategy: {backup_strategy}") - - # Parse configuration_params if configuration_params is None: configuration_params = {} if isinstance(configuration_params, str): configuration_params = json.loads(configuration_params) - # Validate request (raises on failure) _validate_request(datasource_id, backup_strategy, configuration_params) - # Extract configuration values (camelCase keys) - resource_tags = configuration_params.get("tags") - storage_account_id = configuration_params.get("storageAccountResourceId") - blob_container_name = configuration_params.get("blobContainerName") - backup_resource_group_id = configuration_params.get("backupResourceGroupId") - backup_vault_id = configuration_params.get("backupVaultId") - backup_policy_id = configuration_params.get("backupPolicyId") - - # Parse cluster details from resource ID cluster_id_parts = parse_resource_id(datasource_id) - cluster_subscription_id = cluster_id_parts['subscription'] - cluster_resource_group_name = cluster_id_parts['resource_group'] - cluster_name = cluster_id_parts['name'] + return ( + configuration_params, + cluster_id_parts['subscription'], + cluster_id_parts['resource_group'], + cluster_id_parts['name'], + ) - if resource_tags: - print(f"Resource Tags: {json.dumps(resource_tags)}") - # Show execution plan and get user confirmation +def _show_plan_and_confirm(cluster_subscription_id, cluster_name, + backup_strategy): + """Display the execution plan and prompt for user confirmation. + + Returns: + True if user confirmed, False otherwise + """ print("\nThis command will perform the following steps:") print(" [1] Validate the AKS cluster") - print(" [2] Create or reuse a backup resource group (AKSAzureBackup_)") + print(" [2] Create or reuse a backup resource group") print(" [3] Create or reuse a storage account for backup data") print(" [4] Install the data protection extension on the cluster") print(" [5] Create or reuse a backup vault") @@ -916,12 +981,12 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print(" [8] Create a backup instance to start protection") print("") print("The following RBAC role assignments will be created:") - print(" - Cluster MSI → Contributor on Backup Resource Group") - print(" - Extension MSI → Storage Blob Data Contributor on Storage Account") - print(" - Vault MSI → Reader on AKS Cluster") - print(" - Vault MSI → Reader on Backup Resource Group") - print(" - Vault MSI → Disk Snapshot Contributor on Backup Resource Group") - print(" - Vault MSI → Storage Blob Data Reader on Storage Account") + print(" - Cluster MSI -> Contributor on Backup Resource Group") + print(" - Extension MSI -> Storage Blob Data Contributor on SA") + print(" - Vault MSI -> Reader on AKS Cluster") + print(" - Vault MSI -> Reader on Backup Resource Group") + print(" - Vault MSI -> Disk Snapshot Contributor on Backup RG") + print(" - Vault MSI -> Storage Blob Data Reader on SA") print("") print(f" Subscription: {cluster_subscription_id}") print(f" Cluster: {cluster_name}") @@ -934,55 +999,45 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("") from knack.prompting import prompt_y_n - if not prompt_y_n("Do you want to proceed?", default='y'): - print("Operation cancelled by user.") - return + return prompt_y_n("Do you want to proceed?", default='y') - from azure.mgmt.resource import ResourceManagementClient - resource_client = get_mgmt_service_client(cmd.cli_ctx, ResourceManagementClient, subscription_id=cluster_subscription_id) - # Pre-check: Verify no existing backup instance for this cluster - print("\n[Pre-check] Checking for existing backup...") - _check_existing_backup_instance(resource_client, datasource_id, cluster_name) - - # Step 1: Validate cluster - print("\n[1/8] Validating cluster...") - cluster_resource, cluster_location, cluster_identity_principal_id = _validate_cluster(resource_client, datasource_id, cluster_name) +def _setup_extension_and_storage( + cmd, cluster_subscription_id, cluster_resource_group_name, + cluster_name, storage_account_id, blob_container_name, + backup_resource_group_name, cluster_location, resource_tags): + """Setup backup extension and storage account (steps 3 & 4). - # Step 2: Setup resource group - print("\n[2/8] Setting up backup resource group...") - backup_resource_group, backup_resource_group_name = _setup_resource_group( - cmd, resource_client, backup_resource_group_id, cluster_location, cluster_name, - cluster_identity_principal_id, resource_tags) + If the extension is already installed, reuses its configured storage + account. Otherwise creates storage first, then installs the extension. - # Step 3 & 4: Check extension first, then handle storage account accordingly - # If the extension is already installed, use its configured storage account - # instead of creating/finding a new one (which may be different). + Returns: + storage account object + """ print("\n[3/8] Checking for existing backup extension...") existing_extension = _get_existing_backup_extension( - cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name) + cmd, cluster_subscription_id, + cluster_resource_group_name, cluster_name) if existing_extension: - print(f"\tBackup extension already installed: {existing_extension.name}") - - # Extract the storage account the extension is actually configured with - ext_sa, ext_sa_name, ext_container, ext_sa_rg = _get_storage_account_from_extension( - cmd, existing_extension, cluster_subscription_id) + print(f"\tBackup extension already installed: " + f"{existing_extension.name}") + ext_sa, ext_sa_name, _, _ = \ + _get_storage_account_from_extension( + cmd, existing_extension, cluster_subscription_id) if ext_sa: - # Use the extension's configured storage account for all subsequent operations backup_storage_account = ext_sa - backup_storage_account_name = ext_sa_name - backup_storage_account_container_name = ext_container print(f"\tUsing extension's storage account: {ext_sa_name}") else: - # Fallback: extension exists but we can't read its config — setup storage account normally - print("\tWarning: Could not read extension storage config, setting up storage account...") - backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( - cmd, cluster_subscription_id, storage_account_id, blob_container_name, - backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) + print("\tWarning: Could not read extension storage " + "config, setting up storage account...") + backup_storage_account = _setup_storage_account( + cmd, cluster_subscription_id, storage_account_id, + blob_container_name, backup_resource_group_name, + cluster_location, cluster_name, + cluster_resource_group_name, resource_tags)[0] - # Ensure extension identity has correct role on its storage account _check_and_assign_role( cmd, role="Storage Blob Data Contributor", @@ -994,23 +1049,96 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy print("\n[4/8] Backup extension already installed...") print("\t[OK] Backup extension ready") else: - # No extension — setup storage account first, then install extension - print("\tNo existing extension found, setting up storage account...") - backup_storage_account, backup_storage_account_name, backup_storage_account_container_name = _setup_storage_account( - cmd, cluster_subscription_id, storage_account_id, blob_container_name, - backup_resource_group_name, cluster_location, cluster_name, cluster_resource_group_name, resource_tags) + print("\tNo existing extension found, setting up storage...") + sa_result = _setup_storage_account( + cmd, cluster_subscription_id, storage_account_id, + blob_container_name, backup_resource_group_name, + cluster_location, cluster_name, + cluster_resource_group_name, resource_tags) + backup_storage_account = sa_result[0] print("\n[4/8] Installing backup extension...") _install_backup_extension( - cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, - backup_storage_account_name, backup_storage_account_container_name, + cmd, cluster_subscription_id, + cluster_resource_group_name, cluster_name, + sa_result[1], sa_result[2], backup_resource_group_name, backup_storage_account) + return backup_storage_account + + +def dataprotection_enable_backup_helper( + cmd, datasource_id: str, backup_strategy='Week', + configuration_params=None, yes=False): + """ + Enable backup for an AKS cluster. + + Args: + cmd: CLI command context + datasource_id: Full ARM resource ID of the AKS cluster + backup_strategy: Backup strategy + configuration_params: Dict with configuration settings + """ + print("=" * 60) + print("Enabling backup for AKS cluster") + print("=" * 60) + print(f"Datasource ID: {datasource_id}") + print(f"Backup Strategy: {backup_strategy}") + + # Parse and validate configuration + configuration_params, cluster_subscription_id, \ + cluster_resource_group_name, cluster_name = \ + _parse_and_validate_config( + datasource_id, backup_strategy, configuration_params) + + # Extract configuration values (camelCase keys) + config = configuration_params + resource_tags = config.get("tags") + + if resource_tags: + print(f"Resource Tags: {json.dumps(resource_tags)}") + + # Show execution plan and get user confirmation + if not yes and not _show_plan_and_confirm( + cluster_subscription_id, cluster_name, backup_strategy): + print("Operation cancelled by user.") + return + + from azure.mgmt.resource import ResourceManagementClient + resource_client = get_mgmt_service_client( + cmd.cli_ctx, ResourceManagementClient, + subscription_id=cluster_subscription_id) + + # Pre-check: Verify no existing backup instance for this cluster + print("\n[Pre-check] Checking for existing backup...") + _check_existing_backup_instance(resource_client, datasource_id, cluster_name) + + # Step 1: Validate cluster + print("\n[1/8] Validating cluster...") + cluster_resource, cluster_location, cluster_identity_principal_id = \ + _validate_cluster(resource_client, datasource_id, cluster_name) + + # Step 2: Setup resource group + print("\n[2/8] Setting up backup resource group...") + backup_resource_group, backup_resource_group_name = _setup_resource_group( + cmd, resource_client, config.get("backupResourceGroupId"), + cluster_location, cluster_name, + cluster_identity_principal_id, resource_tags) + + # Step 3 & 4: Setup storage and extension + backup_storage_account = _setup_extension_and_storage( + cmd, cluster_subscription_id, cluster_resource_group_name, + cluster_name, config.get("storageAccountResourceId"), + config.get("blobContainerName"), + backup_resource_group_name, cluster_location, resource_tags) + # Step 5: Setup backup vault print("\n[5/8] Setting up backup vault...") backup_vault, backup_vault_name = _setup_backup_vault( - cmd, backup_strategy, backup_vault_id, cluster_subscription_id, cluster_location, backup_resource_group_name, - cluster_resource, backup_resource_group, resource_tags) + cmd, backup_strategy, config.get("backupVaultId"), + cluster_subscription_id, cluster_location, + backup_resource_group_name, cluster_resource, + backup_resource_group, resource_tags) # Grant vault identity read access to the backup storage account _check_and_assign_role( @@ -1023,13 +1151,16 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy # Step 6: Setup backup policy print("\n[6/8] Setting up backup policy...") backup_policy = _setup_backup_policy( - cmd, backup_vault, backup_vault_name, backup_resource_group_name, - backup_strategy, backup_vault_id, backup_policy_id, cluster_subscription_id) + cmd, backup_vault, backup_vault_name, + backup_resource_group_name, backup_strategy, + config.get("backupVaultId"), config.get("backupPolicyId"), + cluster_subscription_id) # Step 7: Setup trusted access print("\n[7/8] Setting up trusted access...") _setup_trusted_access( - cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault) + cmd, cluster_subscription_id, cluster_resource_group_name, + cluster_name, backup_vault) # Wait for role assignment propagation before creating backup instance import time @@ -1043,8 +1174,12 @@ def dataprotection_enable_backup_helper(cmd, datasource_id: str, backup_strategy # Step 8: Create backup instance print("\n[8/8] Configuring backup instance...") backup_instance, policy_id_for_bi = _create_backup_instance( - cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, - backup_vault_name, backup_resource_group_name, backup_strategy, backup_vault_id, backup_policy, backup_policy_id, backup_resource_group, cluster_subscription_id) + cmd, cluster_name, cluster_resource_group_name, + datasource_id, cluster_location, backup_vault_name, + backup_resource_group_name, backup_strategy, + config.get("backupVaultId"), + backup_policy, config.get("backupPolicyId"), + backup_resource_group, cluster_subscription_id) # Print summary print("\n" + "=" * 60) @@ -1174,7 +1309,9 @@ def _get_policy_config_for_strategy(backup_strategy): } -def _get_backup_instance_payload(backup_instance_name, cluster_name, datasource_id, cluster_location, policy_id, backup_resource_group_id): +def _get_backup_instance_payload( + backup_instance_name, cluster_name, datasource_id, + cluster_location, policy_id, backup_resource_group_id): """Get backup instance payload for AKS cluster.""" return { "backup_instance_name": backup_instance_name, @@ -1222,8 +1359,14 @@ def _get_backup_instance_payload(backup_instance_name, cluster_name, datasource_ } -def _generate_arm_id(subscription_id, resource_group_name, resource_type, resource_name): - return f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/{resource_type}/{resource_name}" +def _generate_arm_id(subscription_id, resource_group_name, + resource_type, resource_name): + """Generate a full ARM resource ID.""" + return ( + f"/subscriptions/{subscription_id}" + f"/resourceGroups/{resource_group_name}" + f"/providers/{resource_type}/{resource_name}" + ) def _generate_backup_resource_group_name(cluster_location): @@ -1341,9 +1484,16 @@ def _generate_trusted_access_role_binding_name(): return f"tarb-{guid_suffix}" -def _create_backup_extension(cmd, subscription_id, resource_group_name, cluster_name, storage_account_name, storage_account_container_name, storage_account_resource_group, storage_account_subscription_id): +def _create_backup_extension( + cmd, subscription_id, resource_group_name, cluster_name, + storage_account_name, storage_account_container_name, + storage_account_resource_group, + storage_account_subscription_id): + """Create or reuse the data protection k8s extension.""" from azext_dataprotection.vendored_sdks.azure_mgmt_kubernetesconfiguration import SourceControlConfigurationClient - k8s_configuration_client = get_mgmt_service_client(cmd.cli_ctx, SourceControlConfigurationClient, subscription_id=subscription_id) + k8s_configuration_client = get_mgmt_service_client( + cmd.cli_ctx, SourceControlConfigurationClient, + subscription_id=subscription_id) extensions = k8s_configuration_client.extensions.list( cluster_rp="Microsoft.ContainerService", @@ -1354,38 +1504,55 @@ def _create_backup_extension(cmd, subscription_id, resource_group_name, cluster_ for page in extensions.by_page(): for extension in page: if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': - # Check extension provisioning state provisioning_state = extension.provisioning_state if provisioning_state == "Succeeded": - print(f"\tData protection extension ({extension.name}) is already installed and healthy.") + print( + f"\tData protection extension ({extension.name}) " + "is already installed and healthy.") return extension - elif provisioning_state == "Failed": + if provisioning_state == "Failed": raise InvalidArgumentValueError( - f"Data protection extension '{extension.name}' exists on cluster '{cluster_name}' but is in Failed state.\n" - f"Please take corrective action before running this command again:\n" - f" 1. Check extension logs: az k8s-extension show --name {extension.name} --cluster-name {cluster_name} --resource-group {resource_group_name} --cluster-type managedClusters\n" - f" 2. Delete the failed extension: az k8s-extension delete --name {extension.name} --cluster-name {cluster_name} --resource-group {resource_group_name} --cluster-type managedClusters --yes\n" - f" 3. Re-run this command to install a fresh extension.\n" - f"For troubleshooting, visit: https://aka.ms/aksclusterbackup" - ) - else: - # Extension is in a transient state (Creating, Updating, Deleting, etc.) - raise InvalidArgumentValueError( - f"Data protection extension '{extension.name}' is in '{provisioning_state}' state.\n" - f"Please wait for the operation to complete and try again." + f"Data protection extension '{extension.name}' " + f"exists on cluster '{cluster_name}' " + f"but is in Failed state.\n" + f"Please take corrective action:\n" + f" 1. Check extension logs: az k8s-extension " + f"show --name {extension.name} " + f"--cluster-name {cluster_name} " + f"--resource-group {resource_group_name} " + f"--cluster-type managedClusters\n" + f" 2. Delete: az k8s-extension delete " + f"--name {extension.name} " + f"--cluster-name {cluster_name} " + f"--resource-group {resource_group_name} " + f"--cluster-type managedClusters --yes\n" + f" 3. Re-run this command.\n" + f"For troubleshooting: " + f"https://aka.ms/aksclusterbackup" ) + raise InvalidArgumentValueError( + f"Data protection extension '{extension.name}' " + f"is in '{provisioning_state}' state.\n" + f"Please wait for the operation to complete " + f"and try again." + ) print("\tInstalling data protection extension (azure-aks-backup)...") from azure.cli.core.extension.operations import add_extension_to_path from importlib import import_module add_extension_to_path("k8s-extension") - K8s_extension_client_factory = import_module("azext_k8s_extension._client_factory") + k8s_ext_client_factory = import_module( + "azext_k8s_extension._client_factory") k8s_extension_module = import_module("azext_k8s_extension.custom") + # The k8s-extension client factory uses the CLI context subscription, + # not a parameter. Set it to the cluster's subscription. + cmd.cli_ctx.data['subscription_id'] = subscription_id extension = k8s_extension_module.create_k8s_extension( cmd=cmd, - client=K8s_extension_client_factory.cf_k8s_extension_operation(cmd.cli_ctx), + client=k8s_ext_client_factory.cf_k8s_extension_operation( + cmd.cli_ctx), resource_group_name=resource_group_name, cluster_name=cluster_name, name="azure-aks-backup", @@ -1398,8 +1565,10 @@ def _create_backup_extension(cmd, subscription_id, resource_group_name, cluster_ configuration_settings=[{ "blobContainer": storage_account_container_name, "storageAccount": storage_account_name, - "storageAccountResourceGroup": storage_account_resource_group, - "storageAccountSubscriptionId": storage_account_subscription_id + "storageAccountResourceGroup": + storage_account_resource_group, + "storageAccountSubscriptionId": + storage_account_subscription_id }] ).result() diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index 33342741ef9..e7d2e1d019c 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1151,7 +1151,10 @@ def restore_initialize_for_item_recovery(cmd, datasource_type, source_datastore, return restore_request -def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_strategy=None, backup_configuration_file=None): +def dataprotection_enable_backup(cmd, datasource_type, datasource_id, + backup_strategy=None, + backup_configuration_file=None, + yes=False): """Enable backup for a datasource using a single command. This command orchestrates all the steps required to enable backup: @@ -1198,7 +1201,8 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, backup_str ) from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper - dataprotection_enable_backup_helper(cmd, datasource_id, backup_strategy, config) + dataprotection_enable_backup_helper( + cmd, datasource_id, backup_strategy, config, yes=yes) def _parse_backup_configuration(backup_configuration_file): From 7b1be51d33ecaf0e36fe3821e954fce8d2902709 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Thu, 19 Mar 2026 12:21:58 +0530 Subject: [PATCH 21/24] lint Signed-off-by: Anshul Ahuja --- .../manual/aks/aks_helper.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index 392810fefd3..dc2533063a5 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -1092,8 +1092,7 @@ def dataprotection_enable_backup_helper( datasource_id, backup_strategy, configuration_params) # Extract configuration values (camelCase keys) - config = configuration_params - resource_tags = config.get("tags") + resource_tags = configuration_params.get("tags") if resource_tags: print(f"Resource Tags: {json.dumps(resource_tags)}") @@ -1121,21 +1120,21 @@ def dataprotection_enable_backup_helper( # Step 2: Setup resource group print("\n[2/8] Setting up backup resource group...") backup_resource_group, backup_resource_group_name = _setup_resource_group( - cmd, resource_client, config.get("backupResourceGroupId"), + cmd, resource_client, configuration_params.get("backupResourceGroupId"), cluster_location, cluster_name, cluster_identity_principal_id, resource_tags) # Step 3 & 4: Setup storage and extension backup_storage_account = _setup_extension_and_storage( cmd, cluster_subscription_id, cluster_resource_group_name, - cluster_name, config.get("storageAccountResourceId"), - config.get("blobContainerName"), + cluster_name, configuration_params.get("storageAccountResourceId"), + configuration_params.get("blobContainerName"), backup_resource_group_name, cluster_location, resource_tags) # Step 5: Setup backup vault print("\n[5/8] Setting up backup vault...") backup_vault, backup_vault_name = _setup_backup_vault( - cmd, backup_strategy, config.get("backupVaultId"), + cmd, backup_strategy, configuration_params.get("backupVaultId"), cluster_subscription_id, cluster_location, backup_resource_group_name, cluster_resource, backup_resource_group, resource_tags) @@ -1153,7 +1152,8 @@ def dataprotection_enable_backup_helper( backup_policy = _setup_backup_policy( cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, - config.get("backupVaultId"), config.get("backupPolicyId"), + configuration_params.get("backupVaultId"), + configuration_params.get("backupPolicyId"), cluster_subscription_id) # Step 7: Setup trusted access @@ -1177,8 +1177,8 @@ def dataprotection_enable_backup_helper( cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, backup_resource_group_name, backup_strategy, - config.get("backupVaultId"), - backup_policy, config.get("backupPolicyId"), + configuration_params.get("backupVaultId"), + backup_policy, configuration_params.get("backupPolicyId"), backup_resource_group, cluster_subscription_id) # Print summary From f617628f56576585314edff30ea91105e560480a Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 23 Mar 2026 10:02:13 +0530 Subject: [PATCH 22/24] PR Feedback Signed-off-by: Anshul Ahuja --- .../manual/aks/aks_helper.py | 300 +++++++++--------- .../azext_dataprotection/manual/custom.py | 34 +- 2 files changed, 157 insertions(+), 177 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index dc2533063a5..c1cb4cc0d2a 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -2,6 +2,9 @@ from azure.cli.core.azclierror import InvalidArgumentValueError from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.mgmt.core.tools import parse_resource_id +from knack.log import get_logger + +logger = get_logger(__name__) # Tag used to identify storage accounts created for AKS backup @@ -31,24 +34,24 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", # Check if role assignment already exists try: if list_role_assignments(cmd, assignee=assignee, role=role, scope=scope, include_inherited=True): - print(f"\tRole '{role}' already assigned to {identity_name}") + logger.warning("\tRole '%s' already assigned to %s", role, identity_name) return True except Exception as e: # pylint: disable=broad-exception-caught - print(f"\tWarning: Could not list role assignments for {identity_name}: {str(e)[:100]}") + logger.warning("\tWarning: Could not list role assignments for %s: %s", identity_name, str(e)[:100]) # Continue to try creating the assignment # Try to create with retries for identity propagation delay for attempt in range(max_retries): try: create_role_assignment(cmd, role=role, assignee=assignee, scope=scope) - print(f"\tRole '{role}' assigned to {identity_name}") + logger.warning("\tRole '%s' assigned to %s", role, identity_name) return True except Exception as e: # pylint: disable=broad-exception-caught error_str = str(e).lower() # Already exists — treat as success if "conflict" in error_str or "already exists" in error_str: - print(f"\tRole '{role}' already assigned to {identity_name}") + logger.warning("\tRole '%s' already assigned to %s", role, identity_name) return True # Principal not found — retryable (identity propagation) @@ -58,7 +61,7 @@ def _check_and_assign_role(cmd, role, assignee, scope, identity_name="identity", or "cannot find" in error_str ) if is_propagation_error and attempt < max_retries - 1: - print(f"\tWaiting for identity to propagate... (attempt {attempt + 1}/{max_retries})") + logger.warning("\tWaiting for identity to propagate... (attempt %d/%d)", attempt + 1, max_retries) time.sleep(retry_delay) continue @@ -162,7 +165,7 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name Returns: None if no backup instance exists, raises error with details if one exists """ - print("\tChecking for existing backup configuration...") + logger.warning("Checking for existing backup configuration...") try: # Use extension routing to query backup instances on the cluster @@ -183,7 +186,7 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name # If list is empty, no backup instance exists if not bi_list: - print("\tNo existing backup instance found") + logger.warning("No existing backup instance found") return None # Get details of the first backup instance @@ -210,11 +213,11 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name vault_name = bi_parts.get('name', 'Unknown') vault_rg = bi_parts.get('resource_group', 'Unknown') - print("\tFound existing backup instance!") - print(f"\t\t- Backup Instance: {bi_name}") - print(f"\t\t- Backup Vault: {vault_name}") - print(f"\t\t- Resource Group: {vault_rg}") - print(f"\t\t- Protection State: {protection_status}") + logger.warning("Found existing backup instance!") + logger.warning(" - Backup Instance: %s", bi_name) + logger.warning(" - Backup Vault: %s", vault_name) + logger.warning(" - Resource Group: %s", vault_rg) + logger.warning(" - Protection State: %s", protection_status) error_info = "" if protection_error: @@ -223,9 +226,9 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name else: error_msg = str(protection_error) if len(str(error_msg)) > 100: - print(f"\t\t- Error Details: {error_msg[:100]}...") + logger.warning(" - Error Details: %s...", error_msg[:100]) else: - print(f"\t\t- Error Details: {error_msg}") + logger.warning(" - Error Details: %s", error_msg) error_info = f"\n Protection Error: {error_msg}\n" raise InvalidArgumentValueError( @@ -250,13 +253,13 @@ def _check_existing_backup_instance(resource_client, datasource_id, cluster_name # 404 or other errors mean no backup instance exists error_str = str(e).lower() if "not found" in error_str or "404" in error_str or "does not exist" in error_str: - print("\tNo existing backup instance found") + logger.warning("No existing backup instance found") return None # For other errors, log and continue (don't block on extension routing failures) - print(f"\tCould not check for existing backup (will proceed): {str(e)[:100]}") + logger.warning("Could not check for existing backup (will proceed): %s", str(e)[:100]) return None - print("\tNo existing backup instance found") + logger.warning("No existing backup instance found") return None @@ -284,7 +287,7 @@ def _get_cluster_msi_principal_id(cluster_resource, cluster_name): # System-assigned identity if identity.principal_id: - print(f"\tIdentity type: {identity_type} (system-assigned)") + logger.warning("Identity type: %s (system-assigned)", identity_type) return identity.principal_id # User-assigned identity — get the first UAMI's principal ID @@ -301,7 +304,7 @@ def _get_cluster_msi_principal_id(cluster_resource, cluster_name): if principal_id: uami_name = uami_id.split('/')[-1] if '/' in uami_id else uami_id - print(f"\tIdentity type: {identity_type} (user-assigned: {uami_name})") + logger.warning("Identity type: %s (user-assigned: %s)", identity_type, uami_name) return principal_id raise InvalidArgumentValueError( @@ -315,10 +318,10 @@ def _validate_cluster(resource_client, datasource_id, cluster_name): """Validate the AKS cluster exists and get its details.""" cluster_resource = resource_client.resources.get_by_id(datasource_id, api_version="2024-08-01") cluster_location = cluster_resource.location - print(f"\tCluster: {cluster_name}") - print(f"\tLocation: {cluster_location}") + logger.warning("Cluster: %s", cluster_name) + logger.warning("Location: %s", cluster_location) cluster_identity_principal_id = _get_cluster_msi_principal_id(cluster_resource, cluster_name) - print("\t[OK] Cluster validated") + logger.warning("[OK] Cluster validated") return cluster_resource, cluster_location, cluster_identity_principal_id @@ -351,7 +354,7 @@ def _setup_resource_group(cmd, resource_client, backup_resource_group_id, """Create or use backup resource group.""" if backup_resource_group_id: backup_resource_group_name = parse_resource_id(backup_resource_group_id)['resource_group'] - print(f"\tUsing provided resource group: {backup_resource_group_name}") + logger.warning("Using provided resource group: %s", backup_resource_group_name) try: backup_resource_group = resource_client.resource_groups.get(backup_resource_group_name) except Exception: # pylint: disable=broad-exception-caught @@ -363,17 +366,17 @@ def _setup_resource_group(cmd, resource_client, backup_resource_group_id, ) else: # Search for existing backup resource group with matching tag - print(f"\tSearching for existing AKS backup resource group in region {cluster_location}...") + logger.warning("Searching for existing AKS backup resource group in region %s...", cluster_location) backup_resource_group = _find_existing_backup_resource_group(resource_client, cluster_location) if backup_resource_group: # Found existing resource group - reuse it backup_resource_group_name = backup_resource_group.name - print(f"\tFound existing backup resource group: {backup_resource_group_name}") + logger.warning("Found existing backup resource group: %s", backup_resource_group_name) else: # Create new resource group with AKS backup tag backup_resource_group_name = _generate_backup_resource_group_name(cluster_location) - print(f"\tCreating resource group: {backup_resource_group_name}") + logger.warning("Creating resource group: %s", backup_resource_group_name) # Build tags - include AKS backup tag plus any user-provided tags rg_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -384,14 +387,14 @@ def _setup_resource_group(cmd, resource_client, backup_resource_group_id, backup_resource_group = resource_client.resource_groups.create_or_update( backup_resource_group_name, rg_params) - print(f"\tResource Group: {backup_resource_group.id}") + logger.warning("Resource Group: %s", backup_resource_group.id) _check_and_assign_role( cmd, role="Contributor", assignee=cluster_identity_principal_id, scope=backup_resource_group.id, identity_name="cluster identity") - print("\t[OK] Resource group ready") + logger.warning("[OK] Resource group ready") return backup_resource_group, backup_resource_group_name @@ -438,7 +441,7 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, sa_parts = parse_resource_id(storage_account_id) backup_storage_account_name = sa_parts['name'] storage_account_rg = sa_parts['resource_group'] - print(f"\tUsing provided storage account: {backup_storage_account_name}") + logger.warning("Using provided storage account: %s", backup_storage_account_name) backup_storage_account = storage_client.storage_accounts.get_properties( storage_account_rg, backup_storage_account_name) if blob_container_name: @@ -448,18 +451,18 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, cluster_name, cluster_resource_group_name) else: # Search for existing backup storage account with matching tag - print(f"\tSearching for existing AKS backup storage account in region {cluster_location}...") + logger.warning("Searching for existing AKS backup storage account in region %s...", cluster_location) backup_storage_account, existing_rg = _find_existing_backup_storage_account(storage_client, cluster_location) if backup_storage_account: # Found existing storage account - reuse it backup_storage_account_name = backup_storage_account.name storage_account_rg = existing_rg - print(f"\tFound existing backup storage account: {backup_storage_account_name}") + logger.warning("Found existing backup storage account: %s", backup_storage_account_name) else: # Create new storage account with AKS backup tag backup_storage_account_name = _generate_backup_storage_account_name(cluster_location) - print(f"\tCreating storage account: {backup_storage_account_name}") + logger.warning("Creating storage account: %s", backup_storage_account_name) # Build tags - include AKS backup tag plus any user-provided tags sa_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -482,12 +485,12 @@ def _setup_storage_account(cmd, cluster_subscription_id, storage_account_id, backup_storage_account_container_name = _generate_backup_storage_account_container_name( cluster_name, cluster_resource_group_name) - print(f"\tStorage Account: {backup_storage_account.id}") - print(f"\tCreating blob container: {backup_storage_account_container_name}") + logger.warning("Storage Account: %s", backup_storage_account.id) + logger.warning("Creating blob container: %s", backup_storage_account_container_name) storage_client.blob_containers.create( storage_account_rg, backup_storage_account_name, backup_storage_account_container_name, {}) - print("\t[OK] Storage account ready") + logger.warning("[OK] Storage account ready") return backup_storage_account, backup_storage_account_name, backup_storage_account_container_name @@ -515,7 +518,7 @@ def _install_backup_extension(cmd, cluster_subscription_id, assignee=backup_extension.aks_assigned_identity.principal_id, scope=backup_storage_account.id, identity_name="backup extension identity") - print("\t[OK] Backup extension ready") + logger.warning("[OK] Backup extension ready") return backup_extension @@ -600,9 +603,9 @@ def _get_storage_account_from_extension(cmd, extension, cluster_subscription_id) if not sa_name or not sa_rg: return None, None, None, None - print( - f"\tExtension is configured with storage account: " - f"{sa_name} (RG: {sa_rg}, container: {container})") + logger.warning( + "Extension is configured with storage account: " + "%s (RG: %s, container: %s)", sa_name, sa_rg, container) storage_client = get_mgmt_service_client( cmd.cli_ctx, StorageManagementClient, @@ -611,9 +614,9 @@ def _get_storage_account_from_extension(cmd, extension, cluster_subscription_id) sa = storage_client.storage_accounts.get_properties(sa_rg, sa_name) return sa, sa_name, container, sa_rg except Exception as e: # pylint: disable=broad-exception-caught - print( - f"\tWarning: Could not fetch storage account '{sa_name}' " - f"from extension config: {str(e)[:100]}") + logger.warning( + "Warning: Could not fetch storage account '%s' " + "from extension config: %s", sa_name, str(e)[:100]) return None, None, None, None @@ -680,7 +683,7 @@ def _try_create_vault_with_storage_type( backup_vault = vault_create_cls(cli_ctx=cmd.cli_ctx)(command_args=backup_vault_args).result() return backup_vault except Exception as e: # pylint: disable=broad-exception-caught - print(f"\tVault creation with {storage_type} failed: {str(e)[:120]}") + logger.warning("Vault creation with %s failed: %s", storage_type, str(e)[:120]) return None @@ -696,7 +699,7 @@ def _setup_backup_vault( vault_parts = parse_resource_id(backup_vault_id) backup_vault_name = vault_parts['name'] vault_rg = vault_parts['resource_group'] - print(f"\tUsing provided backup vault: {backup_vault_name}") + logger.warning("Using provided backup vault: %s", backup_vault_name) from azext_dataprotection.aaz.latest.dataprotection.backup_vault import Show as _BackupVaultShow backup_vault = _BackupVaultShow(cli_ctx=cmd.cli_ctx)(command_args={ "vault_name": backup_vault_name, @@ -705,17 +708,17 @@ def _setup_backup_vault( }) else: # Search for existing backup vault with matching tag - print(f"\tSearching for existing AKS backup vault in region {cluster_location}...") + logger.warning("Searching for existing AKS backup vault in region %s...", cluster_location) backup_vault = _find_existing_backup_vault(cmd, cluster_subscription_id, cluster_location) if backup_vault: # Found existing vault - reuse it backup_vault_name = backup_vault['name'] - print(f"\tFound existing backup vault: {backup_vault_name}") + logger.warning("Found existing backup vault: %s", backup_vault_name) else: # Create new backup vault with AKS backup tag backup_vault_name = _generate_backup_vault_name(cluster_location) - print(f"\tCreating backup vault: {backup_vault_name}") + logger.warning("Creating backup vault: %s", backup_vault_name) # Build tags - include AKS backup tag plus any user-provided tags vault_tags = {AKS_BACKUP_TAG_KEY: cluster_location} @@ -728,13 +731,13 @@ def _setup_backup_vault( storage_type = None for try_type in ['GeoRedundant', 'ZoneRedundant', 'LocallyRedundant']: - print(f"\tTrying storage type: {try_type}...") + logger.warning("Trying storage type: %s...", try_type) backup_vault = _try_create_vault_with_storage_type( cmd, _BackupVaultCreate, backup_vault_name, backup_resource_group_name, cluster_location, vault_tags, try_type, cluster_subscription_id) if backup_vault: storage_type = try_type - print(f"\tVault created with storage type: {storage_type}") + logger.warning("Vault created with storage type: %s", storage_type) break if not backup_vault: @@ -744,7 +747,7 @@ def _setup_backup_vault( f"Please check region availability and try again." ) - print(f"\tBackup Vault: {backup_vault['id']}") + logger.warning("Backup Vault: %s", backup_vault['id']) _check_and_assign_role( cmd, role="Reader", @@ -765,7 +768,7 @@ def _setup_backup_vault( assignee=backup_vault["identity"]["principalId"], scope=backup_resource_group.id, identity_name="backup vault identity (snapshot contributor on resource group)") - print("\t[OK] Backup vault ready") + logger.warning("[OK] Backup vault ready") return backup_vault, backup_vault_name @@ -782,7 +785,7 @@ def _setup_backup_policy(cmd, _backup_vault, backup_vault_name, if backup_strategy == 'Custom' and backup_policy_id: # Use provided policy for Custom strategy backup_policy_name = parse_resource_id(backup_policy_id)['name'] - print(f"\tUsing provided backup policy: {backup_policy_name}") + logger.warning("Using provided backup policy: %s", backup_policy_name) backup_policy = {"id": backup_policy_id} else: # Get vault RG - for custom with provided vault, use vault's RG @@ -807,12 +810,12 @@ def _setup_backup_policy(cmd, _backup_vault, backup_vault_name, pass if existing_policy: - print(f"\tFound existing backup policy: {backup_policy_name}") + logger.warning("Found existing backup policy: %s", backup_policy_name) backup_policy = existing_policy else: # Create policy based on strategy policy_config = _get_policy_config_for_strategy(backup_strategy) - print(f"\tCreating backup policy: {backup_policy_name}") + logger.warning("Creating backup policy: %s", backup_policy_name) backup_policy = _BackupPolicyCreate(cli_ctx=cmd.cli_ctx)(command_args={ "backup_policy_name": backup_policy_name, @@ -822,8 +825,10 @@ def _setup_backup_policy(cmd, _backup_vault, backup_vault_name, "subscription": cluster_subscription_id }) - print(f"\tBackup Policy: {backup_policy.get('id', backup_policy_id or 'N/A')}") - print("\t[OK] Backup policy ready") + logger.warning( + "Backup Policy: %s", + backup_policy.get('id', backup_policy_id or 'N/A')) + logger.warning("[OK] Backup policy ready") return backup_policy @@ -841,12 +846,12 @@ def _setup_trusted_access(cmd, cluster_subscription_id, vault_id = backup_vault["id"] vault_name = backup_vault["name"] - print("\tConfiguring trusted access between:") - print(f"\t\t- Backup Vault: {vault_name}") - print(f"\t\t- AKS Cluster: {cluster_name}") + logger.warning("Configuring trusted access between:") + logger.warning(" - Backup Vault: %s", vault_name) + logger.warning(" - AKS Cluster: %s", cluster_name) # Check if trusted access binding already exists for this vault-cluster pair - print("\tChecking for existing trusted access binding...") + logger.warning("Checking for existing trusted access binding...") try: existing_bindings = cluster_client.trusted_access_role_bindings.list( resource_group_name=cluster_resource_group_name, @@ -854,8 +859,8 @@ def _setup_trusted_access(cmd, cluster_subscription_id, ) for binding in existing_bindings: if binding.source_resource_id.lower() == vault_id.lower(): - print(f"\tFound existing binding: {binding.name}") - print("\t[OK] Trusted access already configured") + logger.warning("Found existing binding: %s", binding.name) + logger.warning("[OK] Trusted access already configured") return except Exception: # pylint: disable=broad-exception-caught # If we can't list, we'll try to create @@ -863,8 +868,8 @@ def _setup_trusted_access(cmd, cluster_subscription_id, # Create new trusted access role binding with GUID-based name binding_name = _generate_trusted_access_role_binding_name() - print(f"\tCreating trusted access role binding: {binding_name}") - print("\t\tRole: Microsoft.DataProtection/backupVaults/backup-operator") + logger.warning("Creating trusted access role binding: %s", binding_name) + logger.warning(" Role: Microsoft.DataProtection/backupVaults/backup-operator") _trusted_access_role_binding = TrustedAccessRoleBinding( source_resource_id=vault_id, @@ -875,7 +880,7 @@ def _setup_trusted_access(cmd, cluster_subscription_id, resource_name=cluster_name, trusted_access_role_binding_name=binding_name, trusted_access_role_binding=_trusted_access_role_binding).result() - print("\t[OK] Trusted access configured - vault can now access cluster for backup operations") + logger.warning("[OK] Trusted access configured - vault can now access cluster for backup operations") def _create_backup_instance( @@ -898,7 +903,7 @@ def _create_backup_instance( # Get policy ID policy_id_for_bi = backup_policy.get("id") if isinstance(backup_policy, dict) else backup_policy_id - print(f"\tCreating backup instance: {backup_instance_name}") + logger.warning("Creating backup instance: %s", backup_instance_name) backup_instance_payload = _get_backup_instance_payload( backup_instance_name=backup_instance_name, cluster_name=cluster_name, @@ -918,12 +923,12 @@ def _create_backup_instance( # Check and report the protection state protection_state = backup_instance.get('properties', {}).get('currentProtectionState', 'Unknown') - print(f"\tProtection State: {protection_state}") + logger.warning("Protection State: %s", protection_state) if protection_state == "ProtectionConfigured": - print("\t[OK] Backup instance created and protection configured") + logger.warning("[OK] Backup instance created and protection configured") elif protection_state == "ConfiguringProtection": - print("\t[OK] Backup instance created - protection configuration in progress") + logger.warning("[OK] Backup instance created - protection configuration in progress") elif protection_state == "ProtectionError": error_details = backup_instance.get('properties', {}).get( 'protectionErrorDetails', {}) @@ -931,11 +936,11 @@ def _create_backup_instance( error_msg = error_details.get('message', 'Unknown error') else: error_msg = str(error_details) - print( - f"\t[WARNING] Backup instance created but " - f"protection has errors: {error_msg}") + logger.warning( + "[WARNING] Backup instance created but " + "protection has errors: %s", error_msg) else: - print("\t[OK] Backup instance created") + logger.warning("[OK] Backup instance created") return backup_instance, policy_id_for_bi @@ -970,33 +975,33 @@ def _show_plan_and_confirm(cluster_subscription_id, cluster_name, Returns: True if user confirmed, False otherwise """ - print("\nThis command will perform the following steps:") - print(" [1] Validate the AKS cluster") - print(" [2] Create or reuse a backup resource group") - print(" [3] Create or reuse a storage account for backup data") - print(" [4] Install the data protection extension on the cluster") - print(" [5] Create or reuse a backup vault") - print(" [6] Create or reuse a backup policy") - print(" [7] Configure trusted access between vault and cluster") - print(" [8] Create a backup instance to start protection") - print("") - print("The following RBAC role assignments will be created:") - print(" - Cluster MSI -> Contributor on Backup Resource Group") - print(" - Extension MSI -> Storage Blob Data Contributor on SA") - print(" - Vault MSI -> Reader on AKS Cluster") - print(" - Vault MSI -> Reader on Backup Resource Group") - print(" - Vault MSI -> Disk Snapshot Contributor on Backup RG") - print(" - Vault MSI -> Storage Blob Data Reader on SA") - print("") - print(f" Subscription: {cluster_subscription_id}") - print(f" Cluster: {cluster_name}") - print(" Region: (will be determined from cluster)") - print(f" Strategy: {backup_strategy}") - print("") - print("NOTE: This command requires elevated privileges (Owner or") - print(" User Access Administrator) on the subscription to create") - print(" RBAC role assignments listed above.") - print("") + logger.warning("This command will perform the following steps:") + logger.warning(" [1] Validate the AKS cluster") + logger.warning(" [2] Create or reuse a backup resource group") + logger.warning(" [3] Create or reuse a storage account for backup data") + logger.warning(" [4] Install the data protection extension on the cluster") + logger.warning(" [5] Create or reuse a backup vault") + logger.warning(" [6] Create or reuse a backup policy") + logger.warning(" [7] Configure trusted access between vault and cluster") + logger.warning(" [8] Create a backup instance to start protection") + logger.warning("") + logger.warning("The following RBAC role assignments will be created:") + logger.warning(" - Cluster MSI -> Contributor on Backup Resource Group") + logger.warning(" - Extension MSI -> Storage Blob Data Contributor on SA") + logger.warning(" - Vault MSI -> Reader on AKS Cluster") + logger.warning(" - Vault MSI -> Reader on Backup Resource Group") + logger.warning(" - Vault MSI -> Disk Snapshot Contributor on Backup RG") + logger.warning(" - Vault MSI -> Storage Blob Data Reader on SA") + logger.warning("") + logger.warning(" Subscription: %s", cluster_subscription_id) + logger.warning(" Cluster: %s", cluster_name) + logger.warning(" Region: (will be determined from cluster)") + logger.warning(" Strategy: %s", backup_strategy) + logger.warning("") + logger.warning("NOTE: This command requires elevated privileges (Owner or") + logger.warning(" User Access Administrator) on the subscription to create") + logger.warning(" RBAC role assignments listed above.") + logger.warning("") from knack.prompting import prompt_y_n return prompt_y_n("Do you want to proceed?", default='y') @@ -1014,24 +1019,25 @@ def _setup_extension_and_storage( Returns: storage account object """ - print("\n[3/8] Checking for existing backup extension...") + logger.warning("[3/8] Checking for existing backup extension...") existing_extension = _get_existing_backup_extension( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name) if existing_extension: - print(f"\tBackup extension already installed: " - f"{existing_extension.name}") + logger.warning("Backup extension already installed: %s", + existing_extension.name) ext_sa, ext_sa_name, _, _ = \ _get_storage_account_from_extension( cmd, existing_extension, cluster_subscription_id) if ext_sa: backup_storage_account = ext_sa - print(f"\tUsing extension's storage account: {ext_sa_name}") + logger.warning("Using extension's storage account: %s", + ext_sa_name) else: - print("\tWarning: Could not read extension storage " - "config, setting up storage account...") + logger.warning("Warning: Could not read extension storage " + "config, setting up storage account...") backup_storage_account = _setup_storage_account( cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, @@ -1044,12 +1050,12 @@ def _setup_extension_and_storage( assignee=existing_extension.aks_assigned_identity.principal_id, scope=backup_storage_account.id, identity_name="backup extension identity") - print("\t[OK] Storage account ready") + logger.warning("[OK] Storage account ready") - print("\n[4/8] Backup extension already installed...") - print("\t[OK] Backup extension ready") + logger.warning("[4/8] Backup extension already installed...") + logger.warning("[OK] Backup extension ready") else: - print("\tNo existing extension found, setting up storage...") + logger.warning("No existing extension found, setting up storage...") sa_result = _setup_storage_account( cmd, cluster_subscription_id, storage_account_id, blob_container_name, backup_resource_group_name, @@ -1057,7 +1063,7 @@ def _setup_extension_and_storage( cluster_resource_group_name, resource_tags) backup_storage_account = sa_result[0] - print("\n[4/8] Installing backup extension...") + logger.warning("[4/8] Installing backup extension...") _install_backup_extension( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, @@ -1079,11 +1085,11 @@ def dataprotection_enable_backup_helper( backup_strategy: Backup strategy configuration_params: Dict with configuration settings """ - print("=" * 60) - print("Enabling backup for AKS cluster") - print("=" * 60) - print(f"Datasource ID: {datasource_id}") - print(f"Backup Strategy: {backup_strategy}") + logger.warning("=" * 60) + logger.warning("Enabling backup for AKS cluster") + logger.warning("=" * 60) + logger.warning("Datasource ID: %s", datasource_id) + logger.warning("Backup Strategy: %s", backup_strategy) # Parse and validate configuration configuration_params, cluster_subscription_id, \ @@ -1095,12 +1101,12 @@ def dataprotection_enable_backup_helper( resource_tags = configuration_params.get("tags") if resource_tags: - print(f"Resource Tags: {json.dumps(resource_tags)}") + logger.warning("Resource Tags: %s", json.dumps(resource_tags)) # Show execution plan and get user confirmation if not yes and not _show_plan_and_confirm( cluster_subscription_id, cluster_name, backup_strategy): - print("Operation cancelled by user.") + logger.warning("Operation cancelled by user.") return from azure.mgmt.resource import ResourceManagementClient @@ -1109,16 +1115,16 @@ def dataprotection_enable_backup_helper( subscription_id=cluster_subscription_id) # Pre-check: Verify no existing backup instance for this cluster - print("\n[Pre-check] Checking for existing backup...") + logger.warning("[Pre-check] Checking for existing backup...") _check_existing_backup_instance(resource_client, datasource_id, cluster_name) # Step 1: Validate cluster - print("\n[1/8] Validating cluster...") + logger.warning("[1/8] Validating cluster...") cluster_resource, cluster_location, cluster_identity_principal_id = \ _validate_cluster(resource_client, datasource_id, cluster_name) # Step 2: Setup resource group - print("\n[2/8] Setting up backup resource group...") + logger.warning("[2/8] Setting up backup resource group...") backup_resource_group, backup_resource_group_name = _setup_resource_group( cmd, resource_client, configuration_params.get("backupResourceGroupId"), cluster_location, cluster_name, @@ -1132,7 +1138,7 @@ def dataprotection_enable_backup_helper( backup_resource_group_name, cluster_location, resource_tags) # Step 5: Setup backup vault - print("\n[5/8] Setting up backup vault...") + logger.warning("[5/8] Setting up backup vault...") backup_vault, backup_vault_name = _setup_backup_vault( cmd, backup_strategy, configuration_params.get("backupVaultId"), cluster_subscription_id, cluster_location, @@ -1148,7 +1154,7 @@ def dataprotection_enable_backup_helper( identity_name="backup vault identity (on storage account)") # Step 6: Setup backup policy - print("\n[6/8] Setting up backup policy...") + logger.warning("[6/8] Setting up backup policy...") backup_policy = _setup_backup_policy( cmd, backup_vault, backup_vault_name, backup_resource_group_name, backup_strategy, @@ -1157,7 +1163,7 @@ def dataprotection_enable_backup_helper( cluster_subscription_id) # Step 7: Setup trusted access - print("\n[7/8] Setting up trusted access...") + logger.warning("[7/8] Setting up trusted access...") _setup_trusted_access( cmd, cluster_subscription_id, cluster_resource_group_name, cluster_name, backup_vault) @@ -1165,14 +1171,16 @@ def dataprotection_enable_backup_helper( # Wait for role assignment propagation before creating backup instance import time wait_seconds = 120 - print(f"\n\tWaiting {wait_seconds} seconds for permission propagation across Azure AD...") + logger.warning( + "Waiting %d seconds for permission propagation " + "across Azure AD...", wait_seconds) for remaining in range(wait_seconds, 0, -10): - print(f"\t {remaining} seconds remaining...", end='\r') + logger.warning(" %d seconds remaining...", remaining) time.sleep(min(10, remaining)) - print("\t Permission propagation wait complete. ") + logger.warning("Permission propagation wait complete.") # Step 8: Create backup instance - print("\n[8/8] Configuring backup instance...") + logger.warning("[8/8] Configuring backup instance...") backup_instance, policy_id_for_bi = _create_backup_instance( cmd, cluster_name, cluster_resource_group_name, datasource_id, cluster_location, backup_vault_name, @@ -1182,16 +1190,17 @@ def dataprotection_enable_backup_helper( backup_resource_group, cluster_subscription_id) # Print summary - print("\n" + "=" * 60) - print("Backup enabled successfully!") - print("=" * 60) - print("\nBackup Configuration:") - print(f" * Resource Group: {backup_resource_group.id}") - print(f" * Storage Account: {backup_storage_account.id}") - print(f" * Backup Vault: {backup_vault['id']}") - print(f" * Backup Policy: {policy_id_for_bi}") - print(f" * Backup Instance: {backup_instance.get('id', 'N/A')}") - print("=" * 60) + logger.warning("=" * 60) + logger.warning("Backup enabled successfully!") + logger.warning("=" * 60) + logger.warning("Backup Configuration:") + logger.warning(" * Resource Group: %s", backup_resource_group.id) + logger.warning(" * Storage Account: %s", backup_storage_account.id) + logger.warning(" * Backup Vault: %s", backup_vault['id']) + logger.warning(" * Backup Policy: %s", policy_id_for_bi) + logger.warning(" * Backup Instance: %s", + backup_instance.get('id', 'N/A')) + logger.warning("=" * 60) def _get_policy_config_for_strategy(backup_strategy): @@ -1506,9 +1515,10 @@ def _create_backup_extension( if extension.extension_type.lower() == 'microsoft.dataprotection.kubernetes': provisioning_state = extension.provisioning_state if provisioning_state == "Succeeded": - print( - f"\tData protection extension ({extension.name}) " - "is already installed and healthy.") + logger.warning( + "Data protection extension (%s) " + "is already installed and healthy.", + extension.name) return extension if provisioning_state == "Failed": raise InvalidArgumentValueError( @@ -1537,7 +1547,7 @@ def _create_backup_extension( f"and try again." ) - print("\tInstalling data protection extension (azure-aks-backup)...") + logger.warning("Installing data protection extension (azure-aks-backup)...") from azure.cli.core.extension.operations import add_extension_to_path from importlib import import_module @@ -1574,8 +1584,10 @@ def _create_backup_extension( # Verify extension is in healthy state after installation if extension.provisioning_state == "Succeeded": - print("\tExtension installed and healthy (Provisioning State: Succeeded)") + logger.warning("Extension installed and healthy " + "(Provisioning State: Succeeded)") else: - print(f"\tWarning: Extension provisioning state is '{extension.provisioning_state}'") + logger.warning("Warning: Extension provisioning state is '%s'", + extension.provisioning_state) return extension diff --git a/src/dataprotection/azext_dataprotection/manual/custom.py b/src/dataprotection/azext_dataprotection/manual/custom.py index e7d2e1d019c..d7afe014b1f 100644 --- a/src/dataprotection/azext_dataprotection/manual/custom.py +++ b/src/dataprotection/azext_dataprotection/manual/custom.py @@ -1190,8 +1190,7 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, f"Allowed values: {', '.join(valid_strategies)}" ) - # Parse configuration from file or dict - config = _parse_backup_configuration(backup_configuration_file) + config = backup_configuration_file if backup_configuration_file is not None else {} # Route to datasource-specific handler if datasource_type == "AzureKubernetesService": @@ -1203,34 +1202,3 @@ def dataprotection_enable_backup(cmd, datasource_type, datasource_id, from azext_dataprotection.manual.aks.aks_helper import dataprotection_enable_backup_helper dataprotection_enable_backup_helper( cmd, datasource_id, backup_strategy, config, yes=yes) - - -def _parse_backup_configuration(backup_configuration_file): - """Parse backup configuration from file or dict into a dictionary. - - Args: - backup_configuration_file: Can be: - - None: Returns empty dict - - dict: Returns as-is (already parsed by validate_file_or_dict) - - str: JSON string to parse - - Returns: - dict: Parsed configuration - """ - if backup_configuration_file is None: - return {} - - # If it's already a dict, return as-is (validate_file_or_dict already parsed the file) - if isinstance(backup_configuration_file, dict): - return backup_configuration_file - - # If it's a string, try to parse as JSON - if isinstance(backup_configuration_file, str): - try: - return json.loads(backup_configuration_file) - except json.JSONDecodeError: - raise InvalidArgumentValueError( - f"Invalid JSON in backup-configuration-file: '{backup_configuration_file}'" - ) - - return {} From 2c7bdd4bb44fbe5e6c96d0998118d6e8188c3a47 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 23 Mar 2026 12:35:19 +0530 Subject: [PATCH 23/24] Add license Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/aks/aks_helper.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index c1cb4cc0d2a..cd0e00bbdb7 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -1,3 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------------------------- + import json from azure.cli.core.azclierror import InvalidArgumentValueError from azure.cli.core.commands.client_factory import get_mgmt_service_client From 0916b849ffe417b7c3f812f8adf5a7e3892bda79 Mon Sep 17 00:00:00 2001 From: Anshul Ahuja Date: Mon, 23 Mar 2026 14:38:34 +0530 Subject: [PATCH 24/24] Fix docs Signed-off-by: Anshul Ahuja --- .../azext_dataprotection/manual/_params.py | 17 +++++++++++------ .../manual/aks/aks_helper.py | 2 +- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/dataprotection/azext_dataprotection/manual/_params.py b/src/dataprotection/azext_dataprotection/manual/_params.py index 040ebad8ae3..2056e387f48 100644 --- a/src/dataprotection/azext_dataprotection/manual/_params.py +++ b/src/dataprotection/azext_dataprotection/manual/_params.py @@ -188,14 +188,19 @@ def load_arguments(self, _): c.argument('datasource_type', type=str, help="The type of datasource to be backed up. Supported values: AzureKubernetesService.") c.argument('datasource_id', type=str, help="The full ARM resource ID of the datasource to be backed up.") c.argument('backup_strategy', arg_type=get_enum_type(get_all_backup_strategies()), - help="Backup strategy preset. For AzureKubernetesService: Week (7-day retention), Month (30-day retention), " - "Immutable (7-day Op + 90-day Vault Tier), DisasterRecovery (GRS+CRR), Custom (bring your own vault/policy). " - "Default: Week.") + help="Backup strategy preset (daily incremental backups). " + "For AzureKubernetesService: " + "Week (7-day operational store retention), " + "Month (30-day operational store retention), " + "DisasterRecovery (7-day operational + 90-day vault store retention), " + "Custom (bring your own vault/policy). Default: Week.") c.argument('backup_configuration_file', type=validate_file_or_dict, options_list=['--backup-configuration-file', '-f'], - help="Path to backup configuration file (JSON) or inline JSON string. " - "Available settings: storageAccountResourceId, blobContainerName, backupResourceGroupId, " - "backupVaultId (required for Custom), backupPolicyId (required for Custom), tags.") + help="Path to a JSON backup configuration file. " + "Supports backupVaultId and backupPolicyId " + "(required for Custom strategy). " + "For workload-specific settings, " + "refer to the documentation.") c.argument('yes', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation.') diff --git a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py index cd0e00bbdb7..0392e4e6377 100644 --- a/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py +++ b/src/dataprotection/azext_dataprotection/manual/aks/aks_helper.py @@ -95,7 +95,7 @@ def _validate_request(datasource_id, backup_strategy, configuration_params): Args: datasource_id: Full ARM resource ID of the AKS cluster - backup_strategy: Backup strategy (Week, Month, Immutable, DisasterRecovery, Custom) + backup_strategy: Backup strategy (Week, Month, DisasterRecovery, Custom) configuration_params: Dict with configuration settings (camelCase keys) - storageAccountResourceId: Storage account resource ID - blobContainerName: Blob container name