From b46a316609c9c2fc623fb851aad87e6cb4705d8b Mon Sep 17 00:00:00 2001 From: Pola Shekar Date: Sun, 22 Mar 2026 14:16:48 +0530 Subject: [PATCH] [Confluent] Confluent CLI update for 2025-08-18-preview api version changes --- linter_exclusions.yml | 69 + src/confluent/HISTORY.rst | 10 + .../latest/confluent/agreement/__cmd_group.py | 23 + .../latest/confluent/agreement/__init__.py | 12 + .../aaz/latest/confluent/agreement/_list.py | 205 +++ .../agreement/default/__cmd_group.py | 23 + .../confluent/agreement/default/__init__.py | 12 + .../confluent/agreement/default/_create.py | 263 ++++ .../organization/environment/__init__.py | 3 + .../organization/environment/_create.py | 315 ++++ .../organization/environment/_delete.py | 171 +++ .../organization/environment/_update.py | 472 ++++++ .../environment/cluster/__init__.py | 3 + .../environment/cluster/_create.py | 541 +++++++ .../environment/cluster/_delete.py | 181 +++ .../environment/cluster/_update.py | 728 +++++++++ .../cluster/connector/__cmd_group.py | 23 + .../environment/cluster/connector/__init__.py | 16 + .../environment/cluster/connector/_create.py | 1083 ++++++++++++++ .../environment/cluster/connector/_delete.py | 190 +++ .../environment/cluster/connector/_list.py | 521 +++++++ .../environment/cluster/connector/_show.py | 509 +++++++ .../environment/cluster/connector/_update.py | 1325 +++++++++++++++++ .../environment/cluster/topic/__cmd_group.py | 23 + .../environment/cluster/topic/__init__.py | 16 + .../environment/cluster/topic/_create.py | 390 +++++ .../environment/cluster/topic/_delete.py | 190 +++ .../environment/cluster/topic/_list.py | 267 ++++ .../environment/cluster/topic/_show.py | 255 ++++ .../environment/cluster/topic/_update.py | 562 +++++++ .../confluent/validation/__cmd_group.py | 23 + .../latest/confluent/validation/__init__.py | 13 + .../confluent/validation/_orgvalidate.py | 481 ++++++ .../confluent/validation/_orgvalidate_v2.py | 366 +++++ .../azext_confluent/azext_metadata.json | 2 +- .../azext_confluent/generated/_help.py | 118 ++ src/confluent/azext_confluent/manual/_help.py | 118 ++ src/confluent/setup.py | 2 +- 38 files changed, 9522 insertions(+), 2 deletions(-) create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/__cmd_group.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/__init__.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/_list.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__cmd_group.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__init__.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/_create.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_create.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_delete.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_update.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_create.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_delete.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_update.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__cmd_group.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__init__.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_create.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_delete.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_list.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_show.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_update.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__cmd_group.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__init__.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_create.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_delete.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_list.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_show.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_update.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/validation/__cmd_group.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/validation/__init__.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate.py create mode 100644 src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate_v2.py diff --git a/linter_exclusions.yml b/linter_exclusions.yml index e5775a7fa6a..ec70aefed13 100644 --- a/linter_exclusions.yml +++ b/linter_exclusions.yml @@ -401,6 +401,75 @@ codespace plan create: confidentialledger managedccfs update: rule_exclusions: - missing_command_test_coverage +confluent organization environment: + rule_exclusions: + - require_wait_command_if_no_wait +confluent organization environment cluster: + rule_exclusions: + - require_wait_command_if_no_wait +confluent organization environment cluster connector: + rule_exclusions: + - require_wait_command_if_no_wait +confluent organization environment cluster topic: + rule_exclusions: + - require_wait_command_if_no_wait +confluent agreement default create: + rule_exclusions: + - missing_command_example +confluent organization environment create: + rule_exclusions: + - missing_command_example + parameters: + stream_governance_config: + rule_exclusions: + - option_length_too_long +confluent organization environment update: + rule_exclusions: + - missing_command_example + parameters: + stream_governance_config: + rule_exclusions: + - option_length_too_long +confluent organization environment cluster create: + rule_exclusions: + - missing_command_example +confluent organization environment cluster update: + rule_exclusions: + - missing_command_example +confluent organization environment cluster connector create: + rule_exclusions: + - missing_command_example + parameters: + connector_service_type_info: + rule_exclusions: + - option_length_too_long +confluent organization environment cluster connector update: + rule_exclusions: + - missing_command_example + parameters: + connector_service_type_info: + rule_exclusions: + - option_length_too_long +confluent organization environment cluster topic create: + rule_exclusions: + - missing_command_example + parameters: + partitions_reassignments: + rule_exclusions: + - option_length_too_long +confluent organization environment cluster topic update: + rule_exclusions: + - missing_command_example + parameters: + partitions_reassignments: + rule_exclusions: + - option_length_too_long +confluent validation orgvalidate: + rule_exclusions: + - missing_command_example +confluent validation orgvalidate-v2: + rule_exclusions: + - missing_command_example containerapp env create: parameters: infrastructure_subnet_resource_id: diff --git a/src/confluent/HISTORY.rst b/src/confluent/HISTORY.rst index 19ae0de0496..25b6308233d 100644 --- a/src/confluent/HISTORY.rst +++ b/src/confluent/HISTORY.rst @@ -2,6 +2,16 @@ Release History =============== +1.2.0 ++++++ +* Updated to API version 2025-08-18-preview. +* Added `az confluent agreement list` and `az confluent agreement default create` commands for managing Confluent marketplace agreements. +* Added `az confluent organization environment create/delete/update` commands for managing environments within a Confluent organization. +* Added `az confluent organization environment cluster create/delete/update` commands for managing clusters within an environment. +* Added `az confluent organization environment cluster connector create/delete/list/show/update` commands for managing connectors within a cluster. +* Added `az confluent organization environment cluster topic create/delete/list/show/update` commands for managing topics within a cluster. +* Added `az confluent validation orgvalidate` and `az confluent validation orgvalidate-v2` commands for validating Confluent organization configurations. + 1.1.0 +++++ * Updated az confluent organization create command to accept term_id as an optional parameter. diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__cmd_group.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__cmd_group.py new file mode 100644 index 00000000000..c66527096c8 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "confluent agreement", +) +class __CMDGroup(AAZCommandGroup): + """Manage Agreement + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__init__.py new file mode 100644 index 00000000000..d63ae5a6fc9 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._list import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/_list.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/_list.py new file mode 100644 index 00000000000..8536c5c4504 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/_list.py @@ -0,0 +1,205 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent agreement list", +) +class List(AAZCommand): + """List Confluent marketplace agreements in the subscription. + """ + + _aaz_info = { + "version": "2024-02-13", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/providers/microsoft.confluent/agreements", "2024-02-13"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.MarketplaceAgreementsList(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class MarketplaceAgreementsList(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/providers/Microsoft.Confluent/agreements", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-02-13", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType() + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _element.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.value.Element.properties + properties.accepted = AAZBoolType() + properties.license_text_link = AAZStrType( + serialized_name="licenseTextLink", + ) + properties.plan = AAZStrType() + properties.privacy_policy_link = AAZStrType( + serialized_name="privacyPolicyLink", + ) + properties.product = AAZStrType() + properties.publisher = AAZStrType() + properties.retrieve_datetime = AAZStrType( + serialized_name="retrieveDatetime", + ) + properties.signature = AAZStrType() + + system_data = cls._schema_on_200.value.Element.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + +__all__ = ["List"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__cmd_group.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__cmd_group.py new file mode 100644 index 00000000000..a2bb8e6ad78 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "confluent agreement default", +) +class __CMDGroup(AAZCommandGroup): + """Manage Default + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__init__.py new file mode 100644 index 00000000000..a6df9f5a835 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._create import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/_create.py b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/_create.py new file mode 100644 index 00000000000..06446f58bbc --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/agreement/default/_create.py @@ -0,0 +1,263 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent agreement default create", +) +class Create(AAZCommand): + """Create Confluent Marketplace agreement in the subscription. + """ + + _aaz_info = { + "version": "2024-02-13", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/providers/microsoft.confluent/agreements/default", "2024-02-13"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.accepted = AAZBoolArg( + options=["--accepted"], + arg_group="Properties", + help="If any version of the terms have been accepted, otherwise false.", + ) + _args_schema.license_text_link = AAZStrArg( + options=["--license-text-link"], + arg_group="Properties", + help="Link to HTML with Microsoft and Publisher terms.", + ) + _args_schema.plan = AAZStrArg( + options=["--plan"], + arg_group="Properties", + help="Plan identifier string.", + ) + _args_schema.privacy_policy_link = AAZStrArg( + options=["--privacy-policy-link"], + arg_group="Properties", + help="Link to the privacy policy of the publisher.", + ) + _args_schema.product = AAZStrArg( + options=["--product"], + arg_group="Properties", + help="Product identifier string.", + ) + _args_schema.publisher = AAZStrArg( + options=["--publisher"], + arg_group="Properties", + help="Publisher identifier string.", + ) + _args_schema.retrieve_datetime = AAZDateTimeArg( + options=["--retrieve-datetime"], + arg_group="Properties", + help="Date and time in UTC of when the terms were accepted. This is empty if Accepted is false.", + ) + _args_schema.signature = AAZStrArg( + options=["--signature"], + arg_group="Properties", + help="Terms signature.", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.MarketplaceAgreementsCreate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class MarketplaceAgreementsCreate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/providers/Microsoft.Confluent/agreements/default", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-02-13", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"client_flatten": True}} + ) + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("accepted", AAZBoolType, ".accepted") + properties.set_prop("licenseTextLink", AAZStrType, ".license_text_link") + properties.set_prop("plan", AAZStrType, ".plan") + properties.set_prop("privacyPolicyLink", AAZStrType, ".privacy_policy_link") + properties.set_prop("product", AAZStrType, ".product") + properties.set_prop("publisher", AAZStrType, ".publisher") + properties.set_prop("retrieveDatetime", AAZStrType, ".retrieve_datetime") + properties.set_prop("signature", AAZStrType, ".signature") + + return self.serialize_content(_content_value) + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.properties + properties.accepted = AAZBoolType() + properties.license_text_link = AAZStrType( + serialized_name="licenseTextLink", + ) + properties.plan = AAZStrType() + properties.privacy_policy_link = AAZStrType( + serialized_name="privacyPolicyLink", + ) + properties.product = AAZStrType() + properties.publisher = AAZStrType() + properties.retrieve_datetime = AAZStrType( + serialized_name="retrieveDatetime", + ) + properties.signature = AAZStrType() + + system_data = cls._schema_on_200.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200 + + +class _CreateHelper: + """Helper class for Create""" + + +__all__ = ["Create"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/__init__.py index 2df85698253..c401f439385 100644 --- a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/__init__.py +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/__init__.py @@ -9,5 +9,8 @@ # flake8: noqa from .__cmd_group import * +from ._create import * +from ._delete import * from ._list import * from ._show import * +from ._update import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_create.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_create.py new file mode 100644 index 00000000000..11dc41e090f --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_create.py @@ -0,0 +1,315 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment create", + is_preview=True, +) +class Create(AAZCommand): + """Create confluent environment + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}", "2025-08-18-preview"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.environment_id = AAZStrArg( + options=["-n", "--name", "--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Body", + help="Type of environment", + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + ) + _args_schema.stream_governance_config = AAZObjectArg( + options=["--stream-governance-config"], + arg_group="Properties", + help="Stream governance configuration", + ) + + metadata = cls._args_schema.metadata + metadata.created_timestamp = AAZStrArg( + options=["created-timestamp"], + help="Created Date Time", + ) + metadata.deleted_timestamp = AAZStrArg( + options=["deleted-timestamp"], + help="Deleted Date time", + ) + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + ) + metadata.updated_timestamp = AAZStrArg( + options=["updated-timestamp"], + help="Updated Date time", + ) + + stream_governance_config = cls._args_schema.stream_governance_config + stream_governance_config.package = AAZStrArg( + options=["package"], + help="Stream governance configuration", + enum={"ADVANCED": "ADVANCED", "ESSENTIALS": "ESSENTIALS"}, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.EnvironmentCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class EnvironmentCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"client_flatten": True}} + ) + _builder.set_prop("kind", AAZStrType, ".kind") + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("metadata", AAZObjectType, ".metadata") + properties.set_prop("streamGovernanceConfig", AAZObjectType, ".stream_governance_config") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("createdTimestamp", AAZStrType, ".created_timestamp") + metadata.set_prop("deletedTimestamp", AAZStrType, ".deleted_timestamp") + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + metadata.set_prop("updatedTimestamp", AAZStrType, ".updated_timestamp") + + stream_governance_config = _builder.get(".properties.streamGovernanceConfig") + if stream_governance_config is not None: + stream_governance_config.set_prop("package", AAZStrType, ".package") + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + + _schema_on_200_201 = cls._schema_on_200_201 + _schema_on_200_201.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.kind = AAZStrType() + _schema_on_200_201.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200_201.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200_201.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200_201.properties + properties.metadata = AAZObjectType() + properties.stream_governance_config = AAZObjectType( + serialized_name="streamGovernanceConfig", + ) + + metadata = cls._schema_on_200_201.properties.metadata + metadata.created_timestamp = AAZStrType( + serialized_name="createdTimestamp", + ) + metadata.deleted_timestamp = AAZStrType( + serialized_name="deletedTimestamp", + ) + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + metadata.updated_timestamp = AAZStrType( + serialized_name="updatedTimestamp", + ) + + stream_governance_config = cls._schema_on_200_201.properties.stream_governance_config + stream_governance_config.package = AAZStrType() + + system_data = cls._schema_on_200_201.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200_201 + + +class _CreateHelper: + """Helper class for Create""" + + +__all__ = ["Create"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_delete.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_delete.py new file mode 100644 index 00000000000..5b0a0c62d7e --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_delete.py @@ -0,0 +1,171 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment delete", + is_preview=True, + confirmation="Are you sure you want to perform this operation?", +) +class Delete(AAZCommand): + """Delete confluent environment by id + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}", "2025-08-18-preview"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, None) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.environment_id = AAZStrArg( + options=["-n", "--name", "--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + yield self.EnvironmentDelete(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + class EnvironmentDelete(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [204]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_204, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}", + **self.url_parameters + ) + + @property + def method(self): + return "DELETE" + + @property + def error_format(self): + return "MgmtErrorFormat" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + def on_204(self, session): + pass + + def on_200_201(self, session): + pass + + +class _DeleteHelper: + """Helper class for Delete""" + + +__all__ = ["Delete"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_update.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_update.py new file mode 100644 index 00000000000..1353c8f82f1 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/_update.py @@ -0,0 +1,472 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment update", + is_preview=True, +) +class Update(AAZCommand): + """Update confluent environment + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}", "2025-08-18-preview"], + ] + } + + AZ_SUPPORT_GENERIC_UPDATE = True + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.environment_id = AAZStrArg( + options=["-n", "--name", "--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Body", + help="Type of environment", + nullable=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + nullable=True, + ) + _args_schema.stream_governance_config = AAZObjectArg( + options=["--stream-governance-config"], + arg_group="Properties", + help="Stream governance configuration", + nullable=True, + ) + + metadata = cls._args_schema.metadata + metadata.created_timestamp = AAZStrArg( + options=["created-timestamp"], + help="Created Date Time", + nullable=True, + ) + metadata.deleted_timestamp = AAZStrArg( + options=["deleted-timestamp"], + help="Deleted Date time", + nullable=True, + ) + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + nullable=True, + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + nullable=True, + ) + metadata.updated_timestamp = AAZStrArg( + options=["updated-timestamp"], + help="Updated Date time", + nullable=True, + ) + + stream_governance_config = cls._args_schema.stream_governance_config + stream_governance_config.package = AAZStrArg( + options=["package"], + help="Stream governance configuration", + nullable=True, + enum={"ADVANCED": "ADVANCED", "ESSENTIALS": "ESSENTIALS"}, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.OrganizationGetEnvironmentById(ctx=self.ctx)() + self.pre_instance_update(self.ctx.vars.instance) + self.InstanceUpdateByJson(ctx=self.ctx)() + self.InstanceUpdateByGeneric(ctx=self.ctx)() + self.post_instance_update(self.ctx.vars.instance) + self.EnvironmentCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + @register_callback + def pre_instance_update(self, instance): + pass + + @register_callback + def post_instance_update(self, instance): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class OrganizationGetEnvironmentById(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + _UpdateHelper._build_schema_sc_environment_record_read(cls._schema_on_200) + + return cls._schema_on_200 + + class EnvironmentCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + value=self.ctx.vars.instance, + ) + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + _UpdateHelper._build_schema_sc_environment_record_read(cls._schema_on_200_201) + + return cls._schema_on_200_201 + + class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance(self.ctx.vars.instance) + + def _update_instance(self, instance): + _instance_value, _builder = self.new_content_builder( + self.ctx.args, + value=instance, + typ=AAZObjectType + ) + _builder.set_prop("kind", AAZStrType, ".kind") + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("metadata", AAZObjectType, ".metadata") + properties.set_prop("streamGovernanceConfig", AAZObjectType, ".stream_governance_config") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("createdTimestamp", AAZStrType, ".created_timestamp") + metadata.set_prop("deletedTimestamp", AAZStrType, ".deleted_timestamp") + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + metadata.set_prop("updatedTimestamp", AAZStrType, ".updated_timestamp") + + stream_governance_config = _builder.get(".properties.streamGovernanceConfig") + if stream_governance_config is not None: + stream_governance_config.set_prop("package", AAZStrType, ".package") + + return _instance_value + + class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance_by_generic( + self.ctx.vars.instance, + self.ctx.generic_update_args + ) + + +class _UpdateHelper: + """Helper class for Update""" + + _schema_sc_environment_record_read = None + + @classmethod + def _build_schema_sc_environment_record_read(cls, _schema): + if cls._schema_sc_environment_record_read is not None: + _schema.id = cls._schema_sc_environment_record_read.id + _schema.kind = cls._schema_sc_environment_record_read.kind + _schema.name = cls._schema_sc_environment_record_read.name + _schema.properties = cls._schema_sc_environment_record_read.properties + _schema.system_data = cls._schema_sc_environment_record_read.system_data + _schema.type = cls._schema_sc_environment_record_read.type + return + + cls._schema_sc_environment_record_read = _schema_sc_environment_record_read = AAZObjectType() + + sc_environment_record_read = _schema_sc_environment_record_read + sc_environment_record_read.id = AAZStrType( + flags={"read_only": True}, + ) + sc_environment_record_read.kind = AAZStrType() + sc_environment_record_read.name = AAZStrType( + flags={"read_only": True}, + ) + sc_environment_record_read.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + sc_environment_record_read.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + sc_environment_record_read.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = _schema_sc_environment_record_read.properties + properties.metadata = AAZObjectType() + properties.stream_governance_config = AAZObjectType( + serialized_name="streamGovernanceConfig", + ) + + metadata = _schema_sc_environment_record_read.properties.metadata + metadata.created_timestamp = AAZStrType( + serialized_name="createdTimestamp", + ) + metadata.deleted_timestamp = AAZStrType( + serialized_name="deletedTimestamp", + ) + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + metadata.updated_timestamp = AAZStrType( + serialized_name="updatedTimestamp", + ) + + stream_governance_config = _schema_sc_environment_record_read.properties.stream_governance_config + stream_governance_config.package = AAZStrType() + + system_data = _schema_sc_environment_record_read.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + _schema.id = cls._schema_sc_environment_record_read.id + _schema.kind = cls._schema_sc_environment_record_read.kind + _schema.name = cls._schema_sc_environment_record_read.name + _schema.properties = cls._schema_sc_environment_record_read.properties + _schema.system_data = cls._schema_sc_environment_record_read.system_data + _schema.type = cls._schema_sc_environment_record_read.type + + +__all__ = ["Update"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/__init__.py index e02e05398d8..be302b0b95c 100644 --- a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/__init__.py +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/__init__.py @@ -9,6 +9,9 @@ # flake8: noqa from .__cmd_group import * +from ._create import * from ._create_api_key import * +from ._delete import * from ._list import * from ._show import * +from ._update import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_create.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_create.py new file mode 100644 index 00000000000..80a4afd6969 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_create.py @@ -0,0 +1,541 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster create", + is_preview=True, +) +class Create(AAZCommand): + """Create confluent clusters + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}", "2025-08-18-preview"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["-n", "--name", "--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Body", + help="Type of cluster", + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + ) + _args_schema.spec = AAZObjectArg( + options=["--spec"], + arg_group="Properties", + help="Specification of the cluster", + ) + _args_schema.status = AAZObjectArg( + options=["--status"], + arg_group="Properties", + help="Specification of the cluster status", + ) + + metadata = cls._args_schema.metadata + metadata.created_timestamp = AAZStrArg( + options=["created-timestamp"], + help="Created Date Time", + ) + metadata.deleted_timestamp = AAZStrArg( + options=["deleted-timestamp"], + help="Deleted Date time", + ) + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + ) + metadata.updated_timestamp = AAZStrArg( + options=["updated-timestamp"], + help="Updated Date time", + ) + + spec = cls._args_schema.spec + spec.api_endpoint = AAZStrArg( + options=["api-endpoint"], + help="The Kafka API cluster endpoint", + ) + spec.availability = AAZStrArg( + options=["availability"], + help="The availability zone configuration of the cluster", + ) + spec.byok = AAZObjectArg( + options=["byok"], + help="Specification of the cluster byok", + ) + spec.cloud = AAZStrArg( + options=["cloud"], + help="The cloud service provider", + ) + spec.config = AAZObjectArg( + options=["config"], + help="Specification of the cluster configuration", + ) + spec.environment = AAZObjectArg( + options=["environment"], + help="Specification of the cluster environment", + ) + cls._build_args_sc_cluster_network_environment_entity_create(spec.environment) + spec.http_endpoint = AAZStrArg( + options=["http-endpoint"], + help="The cluster HTTP request URL.", + ) + spec.kafka_bootstrap_endpoint = AAZStrArg( + options=["kafka-bootstrap-endpoint"], + help="The bootstrap endpoint used by Kafka clients to connect to the cluster", + ) + spec.name = AAZStrArg( + options=["name"], + help="The name of the cluster", + ) + spec.network = AAZObjectArg( + options=["network"], + help="Specification of the cluster network", + ) + cls._build_args_sc_cluster_network_environment_entity_create(spec.network) + spec.package = AAZStrArg( + options=["package"], + help="Stream governance configuration", + enum={"ADVANCED": "ADVANCED", "ESSENTIALS": "ESSENTIALS"}, + ) + spec.region = AAZStrArg( + options=["region"], + help="The cloud service provider region", + ) + spec.zone = AAZStrArg( + options=["zone"], + help="type of zone availability", + ) + + byok = cls._args_schema.spec.byok + byok.id = AAZStrArg( + options=["id"], + help="ID of the referred resource", + ) + byok.related = AAZStrArg( + options=["related"], + help="API URL for accessing or modifying the referred object", + ) + byok.resource_name = AAZStrArg( + options=["resource-name"], + help="CRN reference to the referred resource", + ) + + config = cls._args_schema.spec.config + config.kind = AAZStrArg( + options=["kind"], + help="The lifecycle phase of the cluster", + ) + + status = cls._args_schema.status + status.cku = AAZIntArg( + options=["cku"], + help="The number of Confluent Kafka Units", + ) + status.phase = AAZStrArg( + options=["phase"], + help="The lifecycle phase of the cluster", + ) + return cls._args_schema + + _args_sc_cluster_network_environment_entity_create = None + + @classmethod + def _build_args_sc_cluster_network_environment_entity_create(cls, _schema): + if cls._args_sc_cluster_network_environment_entity_create is not None: + _schema.environment = cls._args_sc_cluster_network_environment_entity_create.environment + _schema.id = cls._args_sc_cluster_network_environment_entity_create.id + _schema.related = cls._args_sc_cluster_network_environment_entity_create.related + _schema.resource_name = cls._args_sc_cluster_network_environment_entity_create.resource_name + return + + cls._args_sc_cluster_network_environment_entity_create = AAZObjectArg() + + sc_cluster_network_environment_entity_create = cls._args_sc_cluster_network_environment_entity_create + sc_cluster_network_environment_entity_create.environment = AAZStrArg( + options=["environment"], + help="Environment of the referred resource", + ) + sc_cluster_network_environment_entity_create.id = AAZStrArg( + options=["id"], + help="ID of the referred resource", + ) + sc_cluster_network_environment_entity_create.related = AAZStrArg( + options=["related"], + help="API URL for accessing or modifying the referred object", + ) + sc_cluster_network_environment_entity_create.resource_name = AAZStrArg( + options=["resource-name"], + help="CRN reference to the referred resource", + ) + + _schema.environment = cls._args_sc_cluster_network_environment_entity_create.environment + _schema.id = cls._args_sc_cluster_network_environment_entity_create.id + _schema.related = cls._args_sc_cluster_network_environment_entity_create.related + _schema.resource_name = cls._args_sc_cluster_network_environment_entity_create.resource_name + + def _execute_operations(self): + self.pre_operations() + self.ClusterCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ClusterCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"client_flatten": True}} + ) + _builder.set_prop("kind", AAZStrType, ".kind") + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("metadata", AAZObjectType, ".metadata") + properties.set_prop("spec", AAZObjectType, ".spec") + properties.set_prop("status", AAZObjectType, ".status") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("createdTimestamp", AAZStrType, ".created_timestamp") + metadata.set_prop("deletedTimestamp", AAZStrType, ".deleted_timestamp") + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + metadata.set_prop("updatedTimestamp", AAZStrType, ".updated_timestamp") + + spec = _builder.get(".properties.spec") + if spec is not None: + spec.set_prop("apiEndpoint", AAZStrType, ".api_endpoint") + spec.set_prop("availability", AAZStrType, ".availability") + spec.set_prop("byok", AAZObjectType, ".byok") + spec.set_prop("cloud", AAZStrType, ".cloud") + spec.set_prop("config", AAZObjectType, ".config") + _CreateHelper._build_schema_sc_cluster_network_environment_entity_create(spec.set_prop("environment", AAZObjectType, ".environment")) + spec.set_prop("httpEndpoint", AAZStrType, ".http_endpoint") + spec.set_prop("kafkaBootstrapEndpoint", AAZStrType, ".kafka_bootstrap_endpoint") + spec.set_prop("name", AAZStrType, ".name") + _CreateHelper._build_schema_sc_cluster_network_environment_entity_create(spec.set_prop("network", AAZObjectType, ".network")) + spec.set_prop("package", AAZStrType, ".package") + spec.set_prop("region", AAZStrType, ".region") + spec.set_prop("zone", AAZStrType, ".zone") + + byok = _builder.get(".properties.spec.byok") + if byok is not None: + byok.set_prop("id", AAZStrType, ".id") + byok.set_prop("related", AAZStrType, ".related") + byok.set_prop("resourceName", AAZStrType, ".resource_name") + + config = _builder.get(".properties.spec.config") + if config is not None: + config.set_prop("kind", AAZStrType, ".kind") + + status = _builder.get(".properties.status") + if status is not None: + status.set_prop("cku", AAZIntType, ".cku") + status.set_prop("phase", AAZStrType, ".phase") + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + + _schema_on_200_201 = cls._schema_on_200_201 + _schema_on_200_201.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.kind = AAZStrType() + _schema_on_200_201.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200_201.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200_201.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200_201.properties + properties.metadata = AAZObjectType() + properties.spec = AAZObjectType() + properties.status = AAZObjectType() + + metadata = cls._schema_on_200_201.properties.metadata + metadata.created_timestamp = AAZStrType( + serialized_name="createdTimestamp", + ) + metadata.deleted_timestamp = AAZStrType( + serialized_name="deletedTimestamp", + ) + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + metadata.updated_timestamp = AAZStrType( + serialized_name="updatedTimestamp", + ) + + spec = cls._schema_on_200_201.properties.spec + spec.api_endpoint = AAZStrType( + serialized_name="apiEndpoint", + ) + spec.availability = AAZStrType() + spec.byok = AAZObjectType() + spec.cloud = AAZStrType() + spec.config = AAZObjectType() + spec.environment = AAZObjectType() + _CreateHelper._build_schema_sc_cluster_network_environment_entity_read(spec.environment) + spec.http_endpoint = AAZStrType( + serialized_name="httpEndpoint", + ) + spec.kafka_bootstrap_endpoint = AAZStrType( + serialized_name="kafkaBootstrapEndpoint", + ) + spec.name = AAZStrType() + spec.network = AAZObjectType() + _CreateHelper._build_schema_sc_cluster_network_environment_entity_read(spec.network) + spec.package = AAZStrType() + spec.region = AAZStrType() + spec.zone = AAZStrType() + + byok = cls._schema_on_200_201.properties.spec.byok + byok.id = AAZStrType() + byok.related = AAZStrType() + byok.resource_name = AAZStrType( + serialized_name="resourceName", + ) + + config = cls._schema_on_200_201.properties.spec.config + config.kind = AAZStrType() + + status = cls._schema_on_200_201.properties.status + status.cku = AAZIntType() + status.phase = AAZStrType() + + system_data = cls._schema_on_200_201.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200_201 + + +class _CreateHelper: + """Helper class for Create""" + + @classmethod + def _build_schema_sc_cluster_network_environment_entity_create(cls, _builder): + if _builder is None: + return + _builder.set_prop("environment", AAZStrType, ".environment") + _builder.set_prop("id", AAZStrType, ".id") + _builder.set_prop("related", AAZStrType, ".related") + _builder.set_prop("resourceName", AAZStrType, ".resource_name") + + _schema_sc_cluster_network_environment_entity_read = None + + @classmethod + def _build_schema_sc_cluster_network_environment_entity_read(cls, _schema): + if cls._schema_sc_cluster_network_environment_entity_read is not None: + _schema.environment = cls._schema_sc_cluster_network_environment_entity_read.environment + _schema.id = cls._schema_sc_cluster_network_environment_entity_read.id + _schema.related = cls._schema_sc_cluster_network_environment_entity_read.related + _schema.resource_name = cls._schema_sc_cluster_network_environment_entity_read.resource_name + return + + cls._schema_sc_cluster_network_environment_entity_read = _schema_sc_cluster_network_environment_entity_read = AAZObjectType() + + sc_cluster_network_environment_entity_read = _schema_sc_cluster_network_environment_entity_read + sc_cluster_network_environment_entity_read.environment = AAZStrType() + sc_cluster_network_environment_entity_read.id = AAZStrType() + sc_cluster_network_environment_entity_read.related = AAZStrType() + sc_cluster_network_environment_entity_read.resource_name = AAZStrType( + serialized_name="resourceName", + ) + + _schema.environment = cls._schema_sc_cluster_network_environment_entity_read.environment + _schema.id = cls._schema_sc_cluster_network_environment_entity_read.id + _schema.related = cls._schema_sc_cluster_network_environment_entity_read.related + _schema.resource_name = cls._schema_sc_cluster_network_environment_entity_read.resource_name + + +__all__ = ["Create"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_delete.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_delete.py new file mode 100644 index 00000000000..a7ab9500dbf --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_delete.py @@ -0,0 +1,181 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster delete", + is_preview=True, + confirmation="Are you sure you want to perform this operation?", +) +class Delete(AAZCommand): + """Delete confluent cluster by id + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}", "2025-08-18-preview"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, None) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["-n", "--name", "--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + yield self.ClusterDelete(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + class ClusterDelete(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [204]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_204, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}", + **self.url_parameters + ) + + @property + def method(self): + return "DELETE" + + @property + def error_format(self): + return "MgmtErrorFormat" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + def on_204(self, session): + pass + + def on_200_201(self, session): + pass + + +class _DeleteHelper: + """Helper class for Delete""" + + +__all__ = ["Delete"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_update.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_update.py new file mode 100644 index 00000000000..1a142b5ecbd --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/_update.py @@ -0,0 +1,728 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster update", + is_preview=True, +) +class Update(AAZCommand): + """Update confluent clusters + """ + + _aaz_info = { + "version": "2025-08-18-preview", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}", "2025-08-18-preview"], + ] + } + + AZ_SUPPORT_GENERIC_UPDATE = True + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["-n", "--name", "--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Body", + help="Type of cluster", + nullable=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + nullable=True, + ) + _args_schema.spec = AAZObjectArg( + options=["--spec"], + arg_group="Properties", + help="Specification of the cluster", + nullable=True, + ) + _args_schema.status = AAZObjectArg( + options=["--status"], + arg_group="Properties", + help="Specification of the cluster status", + nullable=True, + ) + + metadata = cls._args_schema.metadata + metadata.created_timestamp = AAZStrArg( + options=["created-timestamp"], + help="Created Date Time", + nullable=True, + ) + metadata.deleted_timestamp = AAZStrArg( + options=["deleted-timestamp"], + help="Deleted Date time", + nullable=True, + ) + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + nullable=True, + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + nullable=True, + ) + metadata.updated_timestamp = AAZStrArg( + options=["updated-timestamp"], + help="Updated Date time", + nullable=True, + ) + + spec = cls._args_schema.spec + spec.api_endpoint = AAZStrArg( + options=["api-endpoint"], + help="The Kafka API cluster endpoint", + nullable=True, + ) + spec.availability = AAZStrArg( + options=["availability"], + help="The availability zone configuration of the cluster", + nullable=True, + ) + spec.byok = AAZObjectArg( + options=["byok"], + help="Specification of the cluster byok", + nullable=True, + ) + spec.cloud = AAZStrArg( + options=["cloud"], + help="The cloud service provider", + nullable=True, + ) + spec.config = AAZObjectArg( + options=["config"], + help="Specification of the cluster configuration", + nullable=True, + ) + spec.environment = AAZObjectArg( + options=["environment"], + help="Specification of the cluster environment", + nullable=True, + ) + cls._build_args_sc_cluster_network_environment_entity_update(spec.environment) + spec.http_endpoint = AAZStrArg( + options=["http-endpoint"], + help="The cluster HTTP request URL.", + nullable=True, + ) + spec.kafka_bootstrap_endpoint = AAZStrArg( + options=["kafka-bootstrap-endpoint"], + help="The bootstrap endpoint used by Kafka clients to connect to the cluster", + nullable=True, + ) + spec.name = AAZStrArg( + options=["name"], + help="The name of the cluster", + nullable=True, + ) + spec.network = AAZObjectArg( + options=["network"], + help="Specification of the cluster network", + nullable=True, + ) + cls._build_args_sc_cluster_network_environment_entity_update(spec.network) + spec.package = AAZStrArg( + options=["package"], + help="Stream governance configuration", + nullable=True, + enum={"ADVANCED": "ADVANCED", "ESSENTIALS": "ESSENTIALS"}, + ) + spec.region = AAZStrArg( + options=["region"], + help="The cloud service provider region", + nullable=True, + ) + spec.zone = AAZStrArg( + options=["zone"], + help="type of zone availability", + nullable=True, + ) + + byok = cls._args_schema.spec.byok + byok.id = AAZStrArg( + options=["id"], + help="ID of the referred resource", + nullable=True, + ) + byok.related = AAZStrArg( + options=["related"], + help="API URL for accessing or modifying the referred object", + nullable=True, + ) + byok.resource_name = AAZStrArg( + options=["resource-name"], + help="CRN reference to the referred resource", + nullable=True, + ) + + config = cls._args_schema.spec.config + config.kind = AAZStrArg( + options=["kind"], + help="The lifecycle phase of the cluster", + nullable=True, + ) + + status = cls._args_schema.status + status.cku = AAZIntArg( + options=["cku"], + help="The number of Confluent Kafka Units", + nullable=True, + ) + status.phase = AAZStrArg( + options=["phase"], + help="The lifecycle phase of the cluster", + nullable=True, + ) + return cls._args_schema + + _args_sc_cluster_network_environment_entity_update = None + + @classmethod + def _build_args_sc_cluster_network_environment_entity_update(cls, _schema): + if cls._args_sc_cluster_network_environment_entity_update is not None: + _schema.environment = cls._args_sc_cluster_network_environment_entity_update.environment + _schema.id = cls._args_sc_cluster_network_environment_entity_update.id + _schema.related = cls._args_sc_cluster_network_environment_entity_update.related + _schema.resource_name = cls._args_sc_cluster_network_environment_entity_update.resource_name + return + + cls._args_sc_cluster_network_environment_entity_update = AAZObjectArg( + nullable=True, + ) + + sc_cluster_network_environment_entity_update = cls._args_sc_cluster_network_environment_entity_update + sc_cluster_network_environment_entity_update.environment = AAZStrArg( + options=["environment"], + help="Environment of the referred resource", + nullable=True, + ) + sc_cluster_network_environment_entity_update.id = AAZStrArg( + options=["id"], + help="ID of the referred resource", + nullable=True, + ) + sc_cluster_network_environment_entity_update.related = AAZStrArg( + options=["related"], + help="API URL for accessing or modifying the referred object", + nullable=True, + ) + sc_cluster_network_environment_entity_update.resource_name = AAZStrArg( + options=["resource-name"], + help="CRN reference to the referred resource", + nullable=True, + ) + + _schema.environment = cls._args_sc_cluster_network_environment_entity_update.environment + _schema.id = cls._args_sc_cluster_network_environment_entity_update.id + _schema.related = cls._args_sc_cluster_network_environment_entity_update.related + _schema.resource_name = cls._args_sc_cluster_network_environment_entity_update.resource_name + + def _execute_operations(self): + self.pre_operations() + self.OrganizationGetClusterById(ctx=self.ctx)() + self.pre_instance_update(self.ctx.vars.instance) + self.InstanceUpdateByJson(ctx=self.ctx)() + self.InstanceUpdateByGeneric(ctx=self.ctx)() + self.post_instance_update(self.ctx.vars.instance) + self.ClusterCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + @register_callback + def pre_instance_update(self, instance): + pass + + @register_callback + def post_instance_update(self, instance): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class OrganizationGetClusterById(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + _UpdateHelper._build_schema_sc_cluster_record_read(cls._schema_on_200) + + return cls._schema_on_200 + + class ClusterCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2025-08-18-preview", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + value=self.ctx.vars.instance, + ) + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + _UpdateHelper._build_schema_sc_cluster_record_read(cls._schema_on_200_201) + + return cls._schema_on_200_201 + + class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance(self.ctx.vars.instance) + + def _update_instance(self, instance): + _instance_value, _builder = self.new_content_builder( + self.ctx.args, + value=instance, + typ=AAZObjectType + ) + _builder.set_prop("kind", AAZStrType, ".kind") + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("metadata", AAZObjectType, ".metadata") + properties.set_prop("spec", AAZObjectType, ".spec") + properties.set_prop("status", AAZObjectType, ".status") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("createdTimestamp", AAZStrType, ".created_timestamp") + metadata.set_prop("deletedTimestamp", AAZStrType, ".deleted_timestamp") + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + metadata.set_prop("updatedTimestamp", AAZStrType, ".updated_timestamp") + + spec = _builder.get(".properties.spec") + if spec is not None: + spec.set_prop("apiEndpoint", AAZStrType, ".api_endpoint") + spec.set_prop("availability", AAZStrType, ".availability") + spec.set_prop("byok", AAZObjectType, ".byok") + spec.set_prop("cloud", AAZStrType, ".cloud") + spec.set_prop("config", AAZObjectType, ".config") + _UpdateHelper._build_schema_sc_cluster_network_environment_entity_update(spec.set_prop("environment", AAZObjectType, ".environment")) + spec.set_prop("httpEndpoint", AAZStrType, ".http_endpoint") + spec.set_prop("kafkaBootstrapEndpoint", AAZStrType, ".kafka_bootstrap_endpoint") + spec.set_prop("name", AAZStrType, ".name") + _UpdateHelper._build_schema_sc_cluster_network_environment_entity_update(spec.set_prop("network", AAZObjectType, ".network")) + spec.set_prop("package", AAZStrType, ".package") + spec.set_prop("region", AAZStrType, ".region") + spec.set_prop("zone", AAZStrType, ".zone") + + byok = _builder.get(".properties.spec.byok") + if byok is not None: + byok.set_prop("id", AAZStrType, ".id") + byok.set_prop("related", AAZStrType, ".related") + byok.set_prop("resourceName", AAZStrType, ".resource_name") + + config = _builder.get(".properties.spec.config") + if config is not None: + config.set_prop("kind", AAZStrType, ".kind") + + status = _builder.get(".properties.status") + if status is not None: + status.set_prop("cku", AAZIntType, ".cku") + status.set_prop("phase", AAZStrType, ".phase") + + return _instance_value + + class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance_by_generic( + self.ctx.vars.instance, + self.ctx.generic_update_args + ) + + +class _UpdateHelper: + """Helper class for Update""" + + @classmethod + def _build_schema_sc_cluster_network_environment_entity_update(cls, _builder): + if _builder is None: + return + _builder.set_prop("environment", AAZStrType, ".environment") + _builder.set_prop("id", AAZStrType, ".id") + _builder.set_prop("related", AAZStrType, ".related") + _builder.set_prop("resourceName", AAZStrType, ".resource_name") + + _schema_sc_cluster_network_environment_entity_read = None + + @classmethod + def _build_schema_sc_cluster_network_environment_entity_read(cls, _schema): + if cls._schema_sc_cluster_network_environment_entity_read is not None: + _schema.environment = cls._schema_sc_cluster_network_environment_entity_read.environment + _schema.id = cls._schema_sc_cluster_network_environment_entity_read.id + _schema.related = cls._schema_sc_cluster_network_environment_entity_read.related + _schema.resource_name = cls._schema_sc_cluster_network_environment_entity_read.resource_name + return + + cls._schema_sc_cluster_network_environment_entity_read = _schema_sc_cluster_network_environment_entity_read = AAZObjectType() + + sc_cluster_network_environment_entity_read = _schema_sc_cluster_network_environment_entity_read + sc_cluster_network_environment_entity_read.environment = AAZStrType() + sc_cluster_network_environment_entity_read.id = AAZStrType() + sc_cluster_network_environment_entity_read.related = AAZStrType() + sc_cluster_network_environment_entity_read.resource_name = AAZStrType( + serialized_name="resourceName", + ) + + _schema.environment = cls._schema_sc_cluster_network_environment_entity_read.environment + _schema.id = cls._schema_sc_cluster_network_environment_entity_read.id + _schema.related = cls._schema_sc_cluster_network_environment_entity_read.related + _schema.resource_name = cls._schema_sc_cluster_network_environment_entity_read.resource_name + + _schema_sc_cluster_record_read = None + + @classmethod + def _build_schema_sc_cluster_record_read(cls, _schema): + if cls._schema_sc_cluster_record_read is not None: + _schema.id = cls._schema_sc_cluster_record_read.id + _schema.kind = cls._schema_sc_cluster_record_read.kind + _schema.name = cls._schema_sc_cluster_record_read.name + _schema.properties = cls._schema_sc_cluster_record_read.properties + _schema.system_data = cls._schema_sc_cluster_record_read.system_data + _schema.type = cls._schema_sc_cluster_record_read.type + return + + cls._schema_sc_cluster_record_read = _schema_sc_cluster_record_read = AAZObjectType() + + sc_cluster_record_read = _schema_sc_cluster_record_read + sc_cluster_record_read.id = AAZStrType( + flags={"read_only": True}, + ) + sc_cluster_record_read.kind = AAZStrType() + sc_cluster_record_read.name = AAZStrType( + flags={"read_only": True}, + ) + sc_cluster_record_read.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + sc_cluster_record_read.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + sc_cluster_record_read.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = _schema_sc_cluster_record_read.properties + properties.metadata = AAZObjectType() + properties.spec = AAZObjectType() + properties.status = AAZObjectType() + + metadata = _schema_sc_cluster_record_read.properties.metadata + metadata.created_timestamp = AAZStrType( + serialized_name="createdTimestamp", + ) + metadata.deleted_timestamp = AAZStrType( + serialized_name="deletedTimestamp", + ) + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + metadata.updated_timestamp = AAZStrType( + serialized_name="updatedTimestamp", + ) + + spec = _schema_sc_cluster_record_read.properties.spec + spec.api_endpoint = AAZStrType( + serialized_name="apiEndpoint", + ) + spec.availability = AAZStrType() + spec.byok = AAZObjectType() + spec.cloud = AAZStrType() + spec.config = AAZObjectType() + spec.environment = AAZObjectType() + cls._build_schema_sc_cluster_network_environment_entity_read(spec.environment) + spec.http_endpoint = AAZStrType( + serialized_name="httpEndpoint", + ) + spec.kafka_bootstrap_endpoint = AAZStrType( + serialized_name="kafkaBootstrapEndpoint", + ) + spec.name = AAZStrType() + spec.network = AAZObjectType() + cls._build_schema_sc_cluster_network_environment_entity_read(spec.network) + spec.package = AAZStrType() + spec.region = AAZStrType() + spec.zone = AAZStrType() + + byok = _schema_sc_cluster_record_read.properties.spec.byok + byok.id = AAZStrType() + byok.related = AAZStrType() + byok.resource_name = AAZStrType( + serialized_name="resourceName", + ) + + config = _schema_sc_cluster_record_read.properties.spec.config + config.kind = AAZStrType() + + status = _schema_sc_cluster_record_read.properties.status + status.cku = AAZIntType() + status.phase = AAZStrType() + + system_data = _schema_sc_cluster_record_read.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + _schema.id = cls._schema_sc_cluster_record_read.id + _schema.kind = cls._schema_sc_cluster_record_read.kind + _schema.name = cls._schema_sc_cluster_record_read.name + _schema.properties = cls._schema_sc_cluster_record_read.properties + _schema.system_data = cls._schema_sc_cluster_record_read.system_data + _schema.type = cls._schema_sc_cluster_record_read.type + + +__all__ = ["Update"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__cmd_group.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__cmd_group.py new file mode 100644 index 00000000000..1561045402f --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "confluent organization environment cluster connector", +) +class __CMDGroup(AAZCommandGroup): + """Manage Connector + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__init__.py new file mode 100644 index 00000000000..c401f439385 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/__init__.py @@ -0,0 +1,16 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._create import * +from ._delete import * +from ._list import * +from ._show import * +from ._update import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_create.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_create.py new file mode 100644 index 00000000000..f13b5d24d90 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_create.py @@ -0,0 +1,1083 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster connector create", +) +class Create(AAZCommand): + """Create confluent connector by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/connectors/{}", "2024-07-01"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + ) + _args_schema.connector_name = AAZStrArg( + options=["-n", "--name", "--connector-name"], + help="Confluent connector name", + required=True, + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.connector_basic_info = AAZObjectArg( + options=["--connector-basic-info"], + arg_group="Properties", + help="Connector Info Base", + ) + _args_schema.connector_service_type_info = AAZObjectArg( + options=["--connector-service-type-info"], + arg_group="Properties", + help="Connector Service type info base properties.", + ) + _args_schema.partner_connector_info = AAZObjectArg( + options=["--partner-connector-info"], + arg_group="Properties", + help="The connection information consumed by applications.", + ) + + connector_basic_info = cls._args_schema.connector_basic_info + connector_basic_info.connector_class = AAZStrArg( + options=["connector-class"], + help="Connector Class", + enum={"AZUREBLOBSINK": "AZUREBLOBSINK", "AZUREBLOBSOURCE": "AZUREBLOBSOURCE"}, + ) + connector_basic_info.connector_id = AAZStrArg( + options=["connector-id"], + help="Connector Id", + ) + connector_basic_info.connector_name = AAZStrArg( + options=["connector-name"], + help="Connector Name", + ) + connector_basic_info.connector_state = AAZStrArg( + options=["connector-state"], + help="Connector Status", + enum={"FAILED": "FAILED", "PAUSED": "PAUSED", "PROVISIONING": "PROVISIONING", "RUNNING": "RUNNING"}, + ) + connector_basic_info.connector_type = AAZStrArg( + options=["connector-type"], + help="Connector Type", + enum={"SINK": "SINK", "SOURCE": "SOURCE"}, + ) + + connector_service_type_info = cls._args_schema.connector_service_type_info + connector_service_type_info.azure_blob_storage_sink_connector = AAZObjectArg( + options=["azure-blob-storage-sink-connector"], + ) + connector_service_type_info.azure_blob_storage_source_connector = AAZObjectArg( + options=["azure-blob-storage-source-connector"], + ) + connector_service_type_info.azure_cosmos_db_sink_connector = AAZObjectArg( + options=["azure-cosmos-db-sink-connector"], + ) + connector_service_type_info.azure_cosmos_db_source_connector = AAZObjectArg( + options=["azure-cosmos-db-source-connector"], + ) + connector_service_type_info.azure_synapse_analytics_sink_connector = AAZObjectArg( + options=["azure-synapse-analytics-sink-connector"], + ) + + azure_blob_storage_sink_connector = cls._args_schema.connector_service_type_info.azure_blob_storage_sink_connector + azure_blob_storage_sink_connector.storage_account_key = AAZStrArg( + options=["storage-account-key"], + help="Azure Blob Storage Account Key", + ) + azure_blob_storage_sink_connector.storage_account_name = AAZStrArg( + options=["storage-account-name"], + help="Azure Blob Storage Account Name", + ) + azure_blob_storage_sink_connector.storage_container_name = AAZStrArg( + options=["storage-container-name"], + help="Azure Blob Storage Account Container Name", + ) + + azure_blob_storage_source_connector = cls._args_schema.connector_service_type_info.azure_blob_storage_source_connector + azure_blob_storage_source_connector.storage_account_key = AAZStrArg( + options=["storage-account-key"], + help="Azure Blob Storage Account Key", + ) + azure_blob_storage_source_connector.storage_account_name = AAZStrArg( + options=["storage-account-name"], + help="Azure Blob Storage Account Name", + ) + azure_blob_storage_source_connector.storage_container_name = AAZStrArg( + options=["storage-container-name"], + help="Azure Blob Storage Account Container Name", + ) + + azure_cosmos_db_sink_connector = cls._args_schema.connector_service_type_info.azure_cosmos_db_sink_connector + azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrArg( + options=["cosmos-connection-endpoint"], + help="Azure Cosmos Database Connection Endpoint", + ) + azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrArg( + options=["cosmos-containers-topic-mapping"], + help="Azure Cosmos Database Containers Topic Mapping", + ) + azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrArg( + options=["cosmos-database-name"], + help="Azure Cosmos Database Name", + ) + azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrArg( + options=["cosmos-id-strategy"], + help="Azure Cosmos Database Id Strategy", + ) + azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrArg( + options=["cosmos-master-key"], + help="Azure Cosmos Database Master Key", + ) + + azure_cosmos_db_source_connector = cls._args_schema.connector_service_type_info.azure_cosmos_db_source_connector + azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrArg( + options=["cosmos-connection-endpoint"], + help="Azure Cosmos Database Connection Endpoint", + ) + azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrArg( + options=["cosmos-containers-topic-mapping"], + help="Azure Cosmos Database Containers Topic Mapping", + ) + azure_cosmos_db_source_connector.cosmos_database_name = AAZStrArg( + options=["cosmos-database-name"], + help="Azure Cosmos Database Name", + ) + azure_cosmos_db_source_connector.cosmos_master_key = AAZStrArg( + options=["cosmos-master-key"], + help="Azure Cosmos Database Master Key", + ) + azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolArg( + options=["cosmos-message-key-enabled"], + help="Azure Cosmos Database Message Key Enabled", + ) + azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrArg( + options=["cosmos-message-key-field"], + help="Azure Cosmos Database Message Key Field", + ) + + azure_synapse_analytics_sink_connector = cls._args_schema.connector_service_type_info.azure_synapse_analytics_sink_connector + azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrArg( + options=["synapse-sql-database-name"], + help="Azure Synapse Dedicated SQL Pool Database Name", + ) + azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrArg( + options=["synapse-sql-password"], + help="Azure Synapse SQL login details", + ) + azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrArg( + options=["synapse-sql-server-name"], + help="Azure Synapse Analytics SQL Server Name", + ) + azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrArg( + options=["synapse-sql-user"], + help="Azure Synapse SQL login details", + ) + + partner_connector_info = cls._args_schema.partner_connector_info + partner_connector_info.kafka_azure_blob_storage_sink = AAZObjectArg( + options=["kafka-azure-blob-storage-sink"], + ) + partner_connector_info.kafka_azure_blob_storage_source = AAZObjectArg( + options=["kafka-azure-blob-storage-source"], + ) + partner_connector_info.kafka_azure_cosmos_db_sink = AAZObjectArg( + options=["kafka-azure-cosmos-db-sink"], + ) + partner_connector_info.kafka_azure_cosmos_db_source = AAZObjectArg( + options=["kafka-azure-cosmos-db-source"], + ) + partner_connector_info.kafka_azure_synapse_analytics_sink = AAZObjectArg( + options=["kafka-azure-synapse-analytics-sink"], + ) + + kafka_azure_blob_storage_sink = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_sink + kafka_azure_blob_storage_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + ) + kafka_azure_blob_storage_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + ) + kafka_azure_blob_storage_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_blob_storage_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + ) + kafka_azure_blob_storage_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + ) + kafka_azure_blob_storage_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + ) + kafka_azure_blob_storage_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + ) + kafka_azure_blob_storage_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + ) + kafka_azure_blob_storage_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_sink.topics + topics.Element = AAZStrArg() + + kafka_azure_blob_storage_source = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_source + kafka_azure_blob_storage_source.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + ) + kafka_azure_blob_storage_source.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Secret", + ) + kafka_azure_blob_storage_source.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_blob_storage_source.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_source.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + ) + kafka_azure_blob_storage_source.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_source.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + ) + kafka_azure_blob_storage_source.topic_regex = AAZStrArg( + options=["topic-regex"], + help="Kafka topics Regex pattern", + ) + kafka_azure_blob_storage_source.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + ) + + kafka_azure_cosmos_db_sink = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_sink + kafka_azure_cosmos_db_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + ) + kafka_azure_cosmos_db_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + ) + kafka_azure_cosmos_db_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_cosmos_db_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + ) + kafka_azure_cosmos_db_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + ) + kafka_azure_cosmos_db_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + ) + kafka_azure_cosmos_db_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + ) + kafka_azure_cosmos_db_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + ) + kafka_azure_cosmos_db_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_sink.topics + topics.Element = AAZStrArg() + + kafka_azure_cosmos_db_source = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_source + kafka_azure_cosmos_db_source.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + ) + kafka_azure_cosmos_db_source.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Secret", + ) + kafka_azure_cosmos_db_source.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_cosmos_db_source.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_source.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + ) + kafka_azure_cosmos_db_source.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_source.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + ) + kafka_azure_cosmos_db_source.topic_regex = AAZStrArg( + options=["topic-regex"], + help="Kafka topics Regex pattern", + ) + kafka_azure_cosmos_db_source.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + ) + + kafka_azure_synapse_analytics_sink = cls._args_schema.partner_connector_info.kafka_azure_synapse_analytics_sink + kafka_azure_synapse_analytics_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + ) + kafka_azure_synapse_analytics_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + ) + kafka_azure_synapse_analytics_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_synapse_analytics_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + ) + kafka_azure_synapse_analytics_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_synapse_analytics_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + ) + kafka_azure_synapse_analytics_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_synapse_analytics_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + ) + kafka_azure_synapse_analytics_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + ) + kafka_azure_synapse_analytics_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + ) + kafka_azure_synapse_analytics_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_synapse_analytics_sink.topics + topics.Element = AAZStrArg() + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ConnectorCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ConnectorCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors/{connectorName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "connectorName", self.ctx.args.connector_name, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"client_flatten": True}} + ) + _builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("connectorBasicInfo", AAZObjectType, ".connector_basic_info") + properties.set_prop("connectorServiceTypeInfo", AAZObjectType, ".connector_service_type_info") + properties.set_prop("partnerConnectorInfo", AAZObjectType, ".partner_connector_info") + + connector_basic_info = _builder.get(".properties.connectorBasicInfo") + if connector_basic_info is not None: + connector_basic_info.set_prop("connectorClass", AAZStrType, ".connector_class") + connector_basic_info.set_prop("connectorId", AAZStrType, ".connector_id") + connector_basic_info.set_prop("connectorName", AAZStrType, ".connector_name") + connector_basic_info.set_prop("connectorState", AAZStrType, ".connector_state") + connector_basic_info.set_prop("connectorType", AAZStrType, ".connector_type") + + connector_service_type_info = _builder.get(".properties.connectorServiceTypeInfo") + if connector_service_type_info is not None: + connector_service_type_info.set_const("connectorServiceType", "AzureBlobStorageSinkConnector", AAZStrType, ".azure_blob_storage_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureBlobStorageSourceConnector", AAZStrType, ".azure_blob_storage_source_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureCosmosDBSinkConnector", AAZStrType, ".azure_cosmos_db_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureCosmosDBSourceConnector", AAZStrType, ".azure_cosmos_db_source_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureSynapseAnalyticsSinkConnector", AAZStrType, ".azure_synapse_analytics_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.discriminate_by("connectorServiceType", "AzureBlobStorageSinkConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureBlobStorageSourceConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureCosmosDBSinkConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureCosmosDBSourceConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureSynapseAnalyticsSinkConnector") + + disc_azure_blob_storage_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureBlobStorageSinkConnector}") + if disc_azure_blob_storage_sink_connector is not None: + disc_azure_blob_storage_sink_connector.set_prop("storageAccountKey", AAZStrType, ".azure_blob_storage_sink_connector.storage_account_key") + disc_azure_blob_storage_sink_connector.set_prop("storageAccountName", AAZStrType, ".azure_blob_storage_sink_connector.storage_account_name") + disc_azure_blob_storage_sink_connector.set_prop("storageContainerName", AAZStrType, ".azure_blob_storage_sink_connector.storage_container_name") + + disc_azure_blob_storage_source_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureBlobStorageSourceConnector}") + if disc_azure_blob_storage_source_connector is not None: + disc_azure_blob_storage_source_connector.set_prop("storageAccountKey", AAZStrType, ".azure_blob_storage_source_connector.storage_account_key", typ_kwargs={"flags": {"secret": True}}) + disc_azure_blob_storage_source_connector.set_prop("storageAccountName", AAZStrType, ".azure_blob_storage_source_connector.storage_account_name") + disc_azure_blob_storage_source_connector.set_prop("storageContainerName", AAZStrType, ".azure_blob_storage_source_connector.storage_container_name") + + disc_azure_cosmos_db_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureCosmosDBSinkConnector}") + if disc_azure_cosmos_db_sink_connector is not None: + disc_azure_cosmos_db_sink_connector.set_prop("cosmosConnectionEndpoint", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_connection_endpoint") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosContainersTopicMapping", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosDatabaseName", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_database_name") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosIdStrategy", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_id_strategy") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosMasterKey", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_master_key") + + disc_azure_cosmos_db_source_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureCosmosDBSourceConnector}") + if disc_azure_cosmos_db_source_connector is not None: + disc_azure_cosmos_db_source_connector.set_prop("cosmosConnectionEndpoint", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_connection_endpoint") + disc_azure_cosmos_db_source_connector.set_prop("cosmosContainersTopicMapping", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_containers_topic_mapping") + disc_azure_cosmos_db_source_connector.set_prop("cosmosDatabaseName", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_database_name") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMasterKey", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_master_key") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMessageKeyEnabled", AAZBoolType, ".azure_cosmos_db_source_connector.cosmos_message_key_enabled") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMessageKeyField", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_message_key_field") + + disc_azure_synapse_analytics_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureSynapseAnalyticsSinkConnector}") + if disc_azure_synapse_analytics_sink_connector is not None: + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlDatabaseName", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_database_name") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlPassword", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_password") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlServerName", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_server_name") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlUser", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_user") + + partner_connector_info = _builder.get(".properties.partnerConnectorInfo") + if partner_connector_info is not None: + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureBlobStorageSink", AAZStrType, ".kafka_azure_blob_storage_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureBlobStorageSource", AAZStrType, ".kafka_azure_blob_storage_source", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureCosmosDBSink", AAZStrType, ".kafka_azure_cosmos_db_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureCosmosDBSource", AAZStrType, ".kafka_azure_cosmos_db_source", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureSynapseAnalyticsSink", AAZStrType, ".kafka_azure_synapse_analytics_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureBlobStorageSink") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureBlobStorageSource") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureCosmosDBSink") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureCosmosDBSource") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureSynapseAnalyticsSink") + + disc_kafka_azure_blob_storage_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSink}") + if disc_kafka_azure_blob_storage_sink is not None: + disc_kafka_azure_blob_storage_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_blob_storage_sink.api_key") + disc_kafka_azure_blob_storage_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_blob_storage_sink.api_secret") + disc_kafka_azure_blob_storage_sink.set_prop("authType", AAZStrType, ".kafka_azure_blob_storage_sink.auth_type") + disc_kafka_azure_blob_storage_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_blob_storage_sink.flush_size") + disc_kafka_azure_blob_storage_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_blob_storage_sink.input_format") + disc_kafka_azure_blob_storage_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_blob_storage_sink.max_tasks") + disc_kafka_azure_blob_storage_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_blob_storage_sink.output_format") + disc_kafka_azure_blob_storage_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_blob_storage_sink.service_account_id") + disc_kafka_azure_blob_storage_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_blob_storage_sink.time_interval") + disc_kafka_azure_blob_storage_sink.set_prop("topics", AAZListType, ".kafka_azure_blob_storage_sink.topics") + disc_kafka_azure_blob_storage_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_blob_storage_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + disc_kafka_azure_blob_storage_source = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSource}") + if disc_kafka_azure_blob_storage_source is not None: + disc_kafka_azure_blob_storage_source.set_prop("apiKey", AAZStrType, ".kafka_azure_blob_storage_source.api_key") + disc_kafka_azure_blob_storage_source.set_prop("apiSecret", AAZStrType, ".kafka_azure_blob_storage_source.api_secret", typ_kwargs={"flags": {"secret": True}}) + disc_kafka_azure_blob_storage_source.set_prop("authType", AAZStrType, ".kafka_azure_blob_storage_source.auth_type") + disc_kafka_azure_blob_storage_source.set_prop("inputFormat", AAZStrType, ".kafka_azure_blob_storage_source.input_format") + disc_kafka_azure_blob_storage_source.set_prop("maxTasks", AAZStrType, ".kafka_azure_blob_storage_source.max_tasks") + disc_kafka_azure_blob_storage_source.set_prop("outputFormat", AAZStrType, ".kafka_azure_blob_storage_source.output_format") + disc_kafka_azure_blob_storage_source.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_blob_storage_source.service_account_id") + disc_kafka_azure_blob_storage_source.set_prop("topicRegex", AAZStrType, ".kafka_azure_blob_storage_source.topic_regex") + disc_kafka_azure_blob_storage_source.set_prop("topicsDir", AAZStrType, ".kafka_azure_blob_storage_source.topics_dir") + + disc_kafka_azure_cosmos_db_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSink}") + if disc_kafka_azure_cosmos_db_sink is not None: + disc_kafka_azure_cosmos_db_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_cosmos_db_sink.api_key") + disc_kafka_azure_cosmos_db_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_cosmos_db_sink.api_secret") + disc_kafka_azure_cosmos_db_sink.set_prop("authType", AAZStrType, ".kafka_azure_cosmos_db_sink.auth_type") + disc_kafka_azure_cosmos_db_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_cosmos_db_sink.flush_size") + disc_kafka_azure_cosmos_db_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_cosmos_db_sink.input_format") + disc_kafka_azure_cosmos_db_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_cosmos_db_sink.max_tasks") + disc_kafka_azure_cosmos_db_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_cosmos_db_sink.output_format") + disc_kafka_azure_cosmos_db_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_cosmos_db_sink.service_account_id") + disc_kafka_azure_cosmos_db_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_cosmos_db_sink.time_interval") + disc_kafka_azure_cosmos_db_sink.set_prop("topics", AAZListType, ".kafka_azure_cosmos_db_sink.topics") + disc_kafka_azure_cosmos_db_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_cosmos_db_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + disc_kafka_azure_cosmos_db_source = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSource}") + if disc_kafka_azure_cosmos_db_source is not None: + disc_kafka_azure_cosmos_db_source.set_prop("apiKey", AAZStrType, ".kafka_azure_cosmos_db_source.api_key") + disc_kafka_azure_cosmos_db_source.set_prop("apiSecret", AAZStrType, ".kafka_azure_cosmos_db_source.api_secret", typ_kwargs={"flags": {"secret": True}}) + disc_kafka_azure_cosmos_db_source.set_prop("authType", AAZStrType, ".kafka_azure_cosmos_db_source.auth_type") + disc_kafka_azure_cosmos_db_source.set_prop("inputFormat", AAZStrType, ".kafka_azure_cosmos_db_source.input_format") + disc_kafka_azure_cosmos_db_source.set_prop("maxTasks", AAZStrType, ".kafka_azure_cosmos_db_source.max_tasks") + disc_kafka_azure_cosmos_db_source.set_prop("outputFormat", AAZStrType, ".kafka_azure_cosmos_db_source.output_format") + disc_kafka_azure_cosmos_db_source.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_cosmos_db_source.service_account_id") + disc_kafka_azure_cosmos_db_source.set_prop("topicRegex", AAZStrType, ".kafka_azure_cosmos_db_source.topic_regex") + disc_kafka_azure_cosmos_db_source.set_prop("topicsDir", AAZStrType, ".kafka_azure_cosmos_db_source.topics_dir") + + disc_kafka_azure_synapse_analytics_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureSynapseAnalyticsSink}") + if disc_kafka_azure_synapse_analytics_sink is not None: + disc_kafka_azure_synapse_analytics_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_synapse_analytics_sink.api_key") + disc_kafka_azure_synapse_analytics_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_synapse_analytics_sink.api_secret") + disc_kafka_azure_synapse_analytics_sink.set_prop("authType", AAZStrType, ".kafka_azure_synapse_analytics_sink.auth_type") + disc_kafka_azure_synapse_analytics_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_synapse_analytics_sink.flush_size") + disc_kafka_azure_synapse_analytics_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_synapse_analytics_sink.input_format") + disc_kafka_azure_synapse_analytics_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_synapse_analytics_sink.max_tasks") + disc_kafka_azure_synapse_analytics_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_synapse_analytics_sink.output_format") + disc_kafka_azure_synapse_analytics_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_synapse_analytics_sink.service_account_id") + disc_kafka_azure_synapse_analytics_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_synapse_analytics_sink.time_interval") + disc_kafka_azure_synapse_analytics_sink.set_prop("topics", AAZListType, ".kafka_azure_synapse_analytics_sink.topics") + disc_kafka_azure_synapse_analytics_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_synapse_analytics_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureSynapseAnalyticsSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + + _schema_on_200_201 = cls._schema_on_200_201 + _schema_on_200_201.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.properties = AAZObjectType( + flags={"required": True, "client_flatten": True}, + ) + _schema_on_200_201.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200_201.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200_201.properties + properties.connector_basic_info = AAZObjectType( + serialized_name="connectorBasicInfo", + ) + properties.connector_service_type_info = AAZObjectType( + serialized_name="connectorServiceTypeInfo", + ) + properties.partner_connector_info = AAZObjectType( + serialized_name="partnerConnectorInfo", + ) + + connector_basic_info = cls._schema_on_200_201.properties.connector_basic_info + connector_basic_info.connector_class = AAZStrType( + serialized_name="connectorClass", + ) + connector_basic_info.connector_id = AAZStrType( + serialized_name="connectorId", + ) + connector_basic_info.connector_name = AAZStrType( + serialized_name="connectorName", + ) + connector_basic_info.connector_state = AAZStrType( + serialized_name="connectorState", + ) + connector_basic_info.connector_type = AAZStrType( + serialized_name="connectorType", + ) + + connector_service_type_info = cls._schema_on_200_201.properties.connector_service_type_info + connector_service_type_info.connector_service_type = AAZStrType( + serialized_name="connectorServiceType", + flags={"required": True}, + ) + + disc_azure_blob_storage_sink_connector = cls._schema_on_200_201.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSinkConnector") + disc_azure_blob_storage_sink_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + ) + disc_azure_blob_storage_sink_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_sink_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_blob_storage_source_connector = cls._schema_on_200_201.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSourceConnector") + disc_azure_blob_storage_source_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + flags={"secret": True}, + ) + disc_azure_blob_storage_source_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_source_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_cosmos_db_sink_connector = cls._schema_on_200_201.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSinkConnector") + disc_azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrType( + serialized_name="cosmosIdStrategy", + ) + disc_azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + + disc_azure_cosmos_db_source_connector = cls._schema_on_200_201.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSourceConnector") + disc_azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_source_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_source_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolType( + serialized_name="cosmosMessageKeyEnabled", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrType( + serialized_name="cosmosMessageKeyField", + ) + + disc_azure_synapse_analytics_sink_connector = cls._schema_on_200_201.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureSynapseAnalyticsSinkConnector") + disc_azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrType( + serialized_name="synapseSqlDatabaseName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrType( + serialized_name="synapseSqlPassword", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrType( + serialized_name="synapseSqlServerName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrType( + serialized_name="synapseSqlUser", + ) + + partner_connector_info = cls._schema_on_200_201.properties.partner_connector_info + partner_connector_info.partner_connector_type = AAZStrType( + serialized_name="partnerConnectorType", + flags={"required": True}, + ) + + disc_kafka_azure_blob_storage_sink = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink") + disc_kafka_azure_blob_storage_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_blob_storage_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_blob_storage_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_blob_storage_sink.topics = AAZListType() + disc_kafka_azure_blob_storage_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_blob_storage_source = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSource") + disc_kafka_azure_blob_storage_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_blob_storage_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_blob_storage_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_cosmos_db_sink = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink") + disc_kafka_azure_cosmos_db_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_cosmos_db_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_cosmos_db_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_cosmos_db_sink.topics = AAZListType() + disc_kafka_azure_cosmos_db_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_cosmos_db_source = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSource") + disc_kafka_azure_cosmos_db_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_cosmos_db_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_cosmos_db_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_synapse_analytics_sink = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink") + disc_kafka_azure_synapse_analytics_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_synapse_analytics_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_synapse_analytics_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_synapse_analytics_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_synapse_analytics_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_synapse_analytics_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_synapse_analytics_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_synapse_analytics_sink.topics = AAZListType() + disc_kafka_azure_synapse_analytics_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200_201.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink").topics + topics.Element = AAZStrType() + + system_data = cls._schema_on_200_201.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200_201 + + +class _CreateHelper: + """Helper class for Create""" + + +__all__ = ["Create"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_delete.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_delete.py new file mode 100644 index 00000000000..d087d977ef8 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_delete.py @@ -0,0 +1,190 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster connector delete", + confirmation="Are you sure you want to perform this operation?", +) +class Delete(AAZCommand): + """Delete confluent connector by name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/connectors/{}", "2024-07-01"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, None) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.connector_name = AAZStrArg( + options=["-n", "--name", "--connector-name"], + help="Confluent connector name", + required=True, + id_part="child_name_3", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + yield self.ConnectorDelete(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + class ConnectorDelete(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [204]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_204, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors/{connectorName}", + **self.url_parameters + ) + + @property + def method(self): + return "DELETE" + + @property + def error_format(self): + return "MgmtErrorFormat" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "connectorName", self.ctx.args.connector_name, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + def on_204(self, session): + pass + + def on_200_201(self, session): + pass + + +class _DeleteHelper: + """Helper class for Delete""" + + +__all__ = ["Delete"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_list.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_list.py new file mode 100644 index 00000000000..24cc2b26c0d --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_list.py @@ -0,0 +1,521 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster connector list", +) +class List(AAZCommand): + """List all the connectors in a cluster + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/connectors", "2024-07-01"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.page_size = AAZIntArg( + options=["--page-size"], + help="Pagination size", + ) + _args_schema.page_token = AAZStrArg( + options=["--page-token"], + help="An opaque pagination token to fetch the next set of records", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ConnectorList(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class ConnectorList(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "pageSize", self.ctx.args.page_size, + ), + **self.serialize_query_param( + "pageToken", self.ctx.args.page_token, + ), + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType() + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"required": True, "client_flatten": True}, + ) + _element.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _element.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.value.Element.properties + properties.connector_basic_info = AAZObjectType( + serialized_name="connectorBasicInfo", + ) + properties.connector_service_type_info = AAZObjectType( + serialized_name="connectorServiceTypeInfo", + ) + properties.partner_connector_info = AAZObjectType( + serialized_name="partnerConnectorInfo", + ) + + connector_basic_info = cls._schema_on_200.value.Element.properties.connector_basic_info + connector_basic_info.connector_class = AAZStrType( + serialized_name="connectorClass", + ) + connector_basic_info.connector_id = AAZStrType( + serialized_name="connectorId", + ) + connector_basic_info.connector_name = AAZStrType( + serialized_name="connectorName", + ) + connector_basic_info.connector_state = AAZStrType( + serialized_name="connectorState", + ) + connector_basic_info.connector_type = AAZStrType( + serialized_name="connectorType", + ) + + connector_service_type_info = cls._schema_on_200.value.Element.properties.connector_service_type_info + connector_service_type_info.connector_service_type = AAZStrType( + serialized_name="connectorServiceType", + flags={"required": True}, + ) + + disc_azure_blob_storage_sink_connector = cls._schema_on_200.value.Element.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSinkConnector") + disc_azure_blob_storage_sink_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + ) + disc_azure_blob_storage_sink_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_sink_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_blob_storage_source_connector = cls._schema_on_200.value.Element.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSourceConnector") + disc_azure_blob_storage_source_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + flags={"secret": True}, + ) + disc_azure_blob_storage_source_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_source_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_cosmos_db_sink_connector = cls._schema_on_200.value.Element.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSinkConnector") + disc_azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrType( + serialized_name="cosmosIdStrategy", + ) + disc_azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + + disc_azure_cosmos_db_source_connector = cls._schema_on_200.value.Element.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSourceConnector") + disc_azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_source_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_source_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolType( + serialized_name="cosmosMessageKeyEnabled", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrType( + serialized_name="cosmosMessageKeyField", + ) + + disc_azure_synapse_analytics_sink_connector = cls._schema_on_200.value.Element.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureSynapseAnalyticsSinkConnector") + disc_azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrType( + serialized_name="synapseSqlDatabaseName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrType( + serialized_name="synapseSqlPassword", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrType( + serialized_name="synapseSqlServerName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrType( + serialized_name="synapseSqlUser", + ) + + partner_connector_info = cls._schema_on_200.value.Element.properties.partner_connector_info + partner_connector_info.partner_connector_type = AAZStrType( + serialized_name="partnerConnectorType", + flags={"required": True}, + ) + + disc_kafka_azure_blob_storage_sink = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink") + disc_kafka_azure_blob_storage_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_blob_storage_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_blob_storage_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_blob_storage_sink.topics = AAZListType() + disc_kafka_azure_blob_storage_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_blob_storage_source = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSource") + disc_kafka_azure_blob_storage_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_blob_storage_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_blob_storage_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_cosmos_db_sink = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink") + disc_kafka_azure_cosmos_db_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_cosmos_db_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_cosmos_db_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_cosmos_db_sink.topics = AAZListType() + disc_kafka_azure_cosmos_db_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_cosmos_db_source = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSource") + disc_kafka_azure_cosmos_db_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_cosmos_db_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_cosmos_db_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_synapse_analytics_sink = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink") + disc_kafka_azure_synapse_analytics_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_synapse_analytics_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_synapse_analytics_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_synapse_analytics_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_synapse_analytics_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_synapse_analytics_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_synapse_analytics_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_synapse_analytics_sink.topics = AAZListType() + disc_kafka_azure_synapse_analytics_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.value.Element.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink").topics + topics.Element = AAZStrType() + + system_data = cls._schema_on_200.value.Element.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + +__all__ = ["List"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_show.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_show.py new file mode 100644 index 00000000000..ebb119c20a7 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_show.py @@ -0,0 +1,509 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster connector show", +) +class Show(AAZCommand): + """Get confluent connector by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/connectors/{}", "2024-07-01"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.connector_name = AAZStrArg( + options=["-n", "--name", "--connector-name"], + help="Confluent connector name", + required=True, + id_part="child_name_3", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ConnectorGet(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ConnectorGet(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors/{connectorName}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "connectorName", self.ctx.args.connector_name, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.properties = AAZObjectType( + flags={"required": True, "client_flatten": True}, + ) + _schema_on_200.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.properties + properties.connector_basic_info = AAZObjectType( + serialized_name="connectorBasicInfo", + ) + properties.connector_service_type_info = AAZObjectType( + serialized_name="connectorServiceTypeInfo", + ) + properties.partner_connector_info = AAZObjectType( + serialized_name="partnerConnectorInfo", + ) + + connector_basic_info = cls._schema_on_200.properties.connector_basic_info + connector_basic_info.connector_class = AAZStrType( + serialized_name="connectorClass", + ) + connector_basic_info.connector_id = AAZStrType( + serialized_name="connectorId", + ) + connector_basic_info.connector_name = AAZStrType( + serialized_name="connectorName", + ) + connector_basic_info.connector_state = AAZStrType( + serialized_name="connectorState", + ) + connector_basic_info.connector_type = AAZStrType( + serialized_name="connectorType", + ) + + connector_service_type_info = cls._schema_on_200.properties.connector_service_type_info + connector_service_type_info.connector_service_type = AAZStrType( + serialized_name="connectorServiceType", + flags={"required": True}, + ) + + disc_azure_blob_storage_sink_connector = cls._schema_on_200.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSinkConnector") + disc_azure_blob_storage_sink_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + ) + disc_azure_blob_storage_sink_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_sink_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_blob_storage_source_connector = cls._schema_on_200.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSourceConnector") + disc_azure_blob_storage_source_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + flags={"secret": True}, + ) + disc_azure_blob_storage_source_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_source_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_cosmos_db_sink_connector = cls._schema_on_200.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSinkConnector") + disc_azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrType( + serialized_name="cosmosIdStrategy", + ) + disc_azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + + disc_azure_cosmos_db_source_connector = cls._schema_on_200.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSourceConnector") + disc_azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_source_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_source_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolType( + serialized_name="cosmosMessageKeyEnabled", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrType( + serialized_name="cosmosMessageKeyField", + ) + + disc_azure_synapse_analytics_sink_connector = cls._schema_on_200.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureSynapseAnalyticsSinkConnector") + disc_azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrType( + serialized_name="synapseSqlDatabaseName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrType( + serialized_name="synapseSqlPassword", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrType( + serialized_name="synapseSqlServerName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrType( + serialized_name="synapseSqlUser", + ) + + partner_connector_info = cls._schema_on_200.properties.partner_connector_info + partner_connector_info.partner_connector_type = AAZStrType( + serialized_name="partnerConnectorType", + flags={"required": True}, + ) + + disc_kafka_azure_blob_storage_sink = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink") + disc_kafka_azure_blob_storage_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_blob_storage_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_blob_storage_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_blob_storage_sink.topics = AAZListType() + disc_kafka_azure_blob_storage_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_blob_storage_source = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSource") + disc_kafka_azure_blob_storage_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_blob_storage_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_blob_storage_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_cosmos_db_sink = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink") + disc_kafka_azure_cosmos_db_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_cosmos_db_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_cosmos_db_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_cosmos_db_sink.topics = AAZListType() + disc_kafka_azure_cosmos_db_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_cosmos_db_source = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSource") + disc_kafka_azure_cosmos_db_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_cosmos_db_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_cosmos_db_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_synapse_analytics_sink = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink") + disc_kafka_azure_synapse_analytics_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_synapse_analytics_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_synapse_analytics_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_synapse_analytics_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_synapse_analytics_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_synapse_analytics_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_synapse_analytics_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_synapse_analytics_sink.topics = AAZListType() + disc_kafka_azure_synapse_analytics_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = cls._schema_on_200.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink").topics + topics.Element = AAZStrType() + + system_data = cls._schema_on_200.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + return cls._schema_on_200 + + +class _ShowHelper: + """Helper class for Show""" + + +__all__ = ["Show"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_update.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_update.py new file mode 100644 index 00000000000..10afff8d282 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/connector/_update.py @@ -0,0 +1,1325 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster connector update", +) +class Update(AAZCommand): + """Update confluent connector by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/connectors/{}", "2024-07-01"], + ] + } + + AZ_SUPPORT_GENERIC_UPDATE = True + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.connector_name = AAZStrArg( + options=["-n", "--name", "--connector-name"], + help="Confluent connector name", + required=True, + id_part="child_name_3", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.connector_basic_info = AAZObjectArg( + options=["--connector-basic-info"], + arg_group="Properties", + help="Connector Info Base", + nullable=True, + ) + _args_schema.connector_service_type_info = AAZObjectArg( + options=["--connector-service-type-info"], + arg_group="Properties", + help="Connector Service type info base properties.", + nullable=True, + ) + _args_schema.partner_connector_info = AAZObjectArg( + options=["--partner-connector-info"], + arg_group="Properties", + help="The connection information consumed by applications.", + nullable=True, + ) + + connector_basic_info = cls._args_schema.connector_basic_info + connector_basic_info.connector_class = AAZStrArg( + options=["connector-class"], + help="Connector Class", + nullable=True, + enum={"AZUREBLOBSINK": "AZUREBLOBSINK", "AZUREBLOBSOURCE": "AZUREBLOBSOURCE"}, + ) + connector_basic_info.connector_id = AAZStrArg( + options=["connector-id"], + help="Connector Id", + nullable=True, + ) + connector_basic_info.connector_name = AAZStrArg( + options=["connector-name"], + help="Connector Name", + nullable=True, + ) + connector_basic_info.connector_state = AAZStrArg( + options=["connector-state"], + help="Connector Status", + nullable=True, + enum={"FAILED": "FAILED", "PAUSED": "PAUSED", "PROVISIONING": "PROVISIONING", "RUNNING": "RUNNING"}, + ) + connector_basic_info.connector_type = AAZStrArg( + options=["connector-type"], + help="Connector Type", + nullable=True, + enum={"SINK": "SINK", "SOURCE": "SOURCE"}, + ) + + connector_service_type_info = cls._args_schema.connector_service_type_info + connector_service_type_info.azure_blob_storage_sink_connector = AAZObjectArg( + options=["azure-blob-storage-sink-connector"], + ) + connector_service_type_info.azure_blob_storage_source_connector = AAZObjectArg( + options=["azure-blob-storage-source-connector"], + ) + connector_service_type_info.azure_cosmos_db_sink_connector = AAZObjectArg( + options=["azure-cosmos-db-sink-connector"], + ) + connector_service_type_info.azure_cosmos_db_source_connector = AAZObjectArg( + options=["azure-cosmos-db-source-connector"], + ) + connector_service_type_info.azure_synapse_analytics_sink_connector = AAZObjectArg( + options=["azure-synapse-analytics-sink-connector"], + ) + + azure_blob_storage_sink_connector = cls._args_schema.connector_service_type_info.azure_blob_storage_sink_connector + azure_blob_storage_sink_connector.storage_account_key = AAZStrArg( + options=["storage-account-key"], + help="Azure Blob Storage Account Key", + nullable=True, + ) + azure_blob_storage_sink_connector.storage_account_name = AAZStrArg( + options=["storage-account-name"], + help="Azure Blob Storage Account Name", + nullable=True, + ) + azure_blob_storage_sink_connector.storage_container_name = AAZStrArg( + options=["storage-container-name"], + help="Azure Blob Storage Account Container Name", + nullable=True, + ) + + azure_blob_storage_source_connector = cls._args_schema.connector_service_type_info.azure_blob_storage_source_connector + azure_blob_storage_source_connector.storage_account_key = AAZStrArg( + options=["storage-account-key"], + help="Azure Blob Storage Account Key", + nullable=True, + ) + azure_blob_storage_source_connector.storage_account_name = AAZStrArg( + options=["storage-account-name"], + help="Azure Blob Storage Account Name", + nullable=True, + ) + azure_blob_storage_source_connector.storage_container_name = AAZStrArg( + options=["storage-container-name"], + help="Azure Blob Storage Account Container Name", + nullable=True, + ) + + azure_cosmos_db_sink_connector = cls._args_schema.connector_service_type_info.azure_cosmos_db_sink_connector + azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrArg( + options=["cosmos-connection-endpoint"], + help="Azure Cosmos Database Connection Endpoint", + nullable=True, + ) + azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrArg( + options=["cosmos-containers-topic-mapping"], + help="Azure Cosmos Database Containers Topic Mapping", + nullable=True, + ) + azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrArg( + options=["cosmos-database-name"], + help="Azure Cosmos Database Name", + nullable=True, + ) + azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrArg( + options=["cosmos-id-strategy"], + help="Azure Cosmos Database Id Strategy", + nullable=True, + ) + azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrArg( + options=["cosmos-master-key"], + help="Azure Cosmos Database Master Key", + nullable=True, + ) + + azure_cosmos_db_source_connector = cls._args_schema.connector_service_type_info.azure_cosmos_db_source_connector + azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrArg( + options=["cosmos-connection-endpoint"], + help="Azure Cosmos Database Connection Endpoint", + nullable=True, + ) + azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrArg( + options=["cosmos-containers-topic-mapping"], + help="Azure Cosmos Database Containers Topic Mapping", + nullable=True, + ) + azure_cosmos_db_source_connector.cosmos_database_name = AAZStrArg( + options=["cosmos-database-name"], + help="Azure Cosmos Database Name", + nullable=True, + ) + azure_cosmos_db_source_connector.cosmos_master_key = AAZStrArg( + options=["cosmos-master-key"], + help="Azure Cosmos Database Master Key", + nullable=True, + ) + azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolArg( + options=["cosmos-message-key-enabled"], + help="Azure Cosmos Database Message Key Enabled", + nullable=True, + ) + azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrArg( + options=["cosmos-message-key-field"], + help="Azure Cosmos Database Message Key Field", + nullable=True, + ) + + azure_synapse_analytics_sink_connector = cls._args_schema.connector_service_type_info.azure_synapse_analytics_sink_connector + azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrArg( + options=["synapse-sql-database-name"], + help="Azure Synapse Dedicated SQL Pool Database Name", + nullable=True, + ) + azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrArg( + options=["synapse-sql-password"], + help="Azure Synapse SQL login details", + nullable=True, + ) + azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrArg( + options=["synapse-sql-server-name"], + help="Azure Synapse Analytics SQL Server Name", + nullable=True, + ) + azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrArg( + options=["synapse-sql-user"], + help="Azure Synapse SQL login details", + nullable=True, + ) + + partner_connector_info = cls._args_schema.partner_connector_info + partner_connector_info.kafka_azure_blob_storage_sink = AAZObjectArg( + options=["kafka-azure-blob-storage-sink"], + ) + partner_connector_info.kafka_azure_blob_storage_source = AAZObjectArg( + options=["kafka-azure-blob-storage-source"], + ) + partner_connector_info.kafka_azure_cosmos_db_sink = AAZObjectArg( + options=["kafka-azure-cosmos-db-sink"], + ) + partner_connector_info.kafka_azure_cosmos_db_source = AAZObjectArg( + options=["kafka-azure-cosmos-db-source"], + ) + partner_connector_info.kafka_azure_synapse_analytics_sink = AAZObjectArg( + options=["kafka-azure-synapse-analytics-sink"], + ) + + kafka_azure_blob_storage_sink = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_sink + kafka_azure_blob_storage_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + nullable=True, + ) + kafka_azure_blob_storage_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + nullable=True, + ) + kafka_azure_blob_storage_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + nullable=True, + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_blob_storage_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + nullable=True, + ) + kafka_azure_blob_storage_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + nullable=True, + ) + kafka_azure_blob_storage_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + nullable=True, + ) + kafka_azure_blob_storage_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + nullable=True, + ) + kafka_azure_blob_storage_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + nullable=True, + ) + kafka_azure_blob_storage_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + nullable=True, + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_sink.topics + topics.Element = AAZStrArg( + nullable=True, + ) + + kafka_azure_blob_storage_source = cls._args_schema.partner_connector_info.kafka_azure_blob_storage_source + kafka_azure_blob_storage_source.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + nullable=True, + ) + kafka_azure_blob_storage_source.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Secret", + nullable=True, + ) + kafka_azure_blob_storage_source.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + nullable=True, + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_blob_storage_source.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_source.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + nullable=True, + ) + kafka_azure_blob_storage_source.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_blob_storage_source.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + nullable=True, + ) + kafka_azure_blob_storage_source.topic_regex = AAZStrArg( + options=["topic-regex"], + help="Kafka topics Regex pattern", + nullable=True, + ) + kafka_azure_blob_storage_source.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + nullable=True, + ) + + kafka_azure_cosmos_db_sink = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_sink + kafka_azure_cosmos_db_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + nullable=True, + ) + kafka_azure_cosmos_db_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + nullable=True, + ) + kafka_azure_cosmos_db_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + nullable=True, + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_cosmos_db_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + nullable=True, + ) + kafka_azure_cosmos_db_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + nullable=True, + ) + kafka_azure_cosmos_db_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + nullable=True, + ) + kafka_azure_cosmos_db_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + nullable=True, + ) + kafka_azure_cosmos_db_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + nullable=True, + ) + kafka_azure_cosmos_db_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + nullable=True, + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_sink.topics + topics.Element = AAZStrArg( + nullable=True, + ) + + kafka_azure_cosmos_db_source = cls._args_schema.partner_connector_info.kafka_azure_cosmos_db_source + kafka_azure_cosmos_db_source.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + nullable=True, + ) + kafka_azure_cosmos_db_source.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Secret", + nullable=True, + ) + kafka_azure_cosmos_db_source.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + nullable=True, + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_cosmos_db_source.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_source.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + nullable=True, + ) + kafka_azure_cosmos_db_source.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_cosmos_db_source.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + nullable=True, + ) + kafka_azure_cosmos_db_source.topic_regex = AAZStrArg( + options=["topic-regex"], + help="Kafka topics Regex pattern", + nullable=True, + ) + kafka_azure_cosmos_db_source.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + nullable=True, + ) + + kafka_azure_synapse_analytics_sink = cls._args_schema.partner_connector_info.kafka_azure_synapse_analytics_sink + kafka_azure_synapse_analytics_sink.api_key = AAZStrArg( + options=["api-key"], + help="Kafka API Key", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.api_secret = AAZStrArg( + options=["api-secret"], + help="Kafka API Key Secret", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.auth_type = AAZStrArg( + options=["auth-type"], + help="Kafka Auth Type", + nullable=True, + enum={"KAFKA_API_KEY": "KAFKA_API_KEY", "SERVICE_ACCOUNT": "SERVICE_ACCOUNT"}, + ) + kafka_azure_synapse_analytics_sink.flush_size = AAZStrArg( + options=["flush-size"], + help="Flush size", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.input_format = AAZStrArg( + options=["input-format"], + help="Kafka Input Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_synapse_analytics_sink.max_tasks = AAZStrArg( + options=["max-tasks"], + help="Maximum Tasks", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.output_format = AAZStrArg( + options=["output-format"], + help="Kafka Output Data Format Type", + nullable=True, + enum={"AVRO": "AVRO", "BYTES": "BYTES", "JSON": "JSON", "PROTOBUF": "PROTOBUF", "STRING": "STRING"}, + ) + kafka_azure_synapse_analytics_sink.service_account_id = AAZStrArg( + options=["service-account-id"], + help="Kafka Service Account Id", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.time_interval = AAZStrArg( + options=["time-interval"], + help="Time Interval", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.topics = AAZListArg( + options=["topics"], + help="Kafka topics list", + nullable=True, + ) + kafka_azure_synapse_analytics_sink.topics_dir = AAZStrArg( + options=["topics-dir"], + help="Kafka topics directory", + nullable=True, + ) + + topics = cls._args_schema.partner_connector_info.kafka_azure_synapse_analytics_sink.topics + topics.Element = AAZStrArg( + nullable=True, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ConnectorGet(ctx=self.ctx)() + self.pre_instance_update(self.ctx.vars.instance) + self.InstanceUpdateByJson(ctx=self.ctx)() + self.InstanceUpdateByGeneric(ctx=self.ctx)() + self.post_instance_update(self.ctx.vars.instance) + self.ConnectorCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + @register_callback + def pre_instance_update(self, instance): + pass + + @register_callback + def post_instance_update(self, instance): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ConnectorGet(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors/{connectorName}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "connectorName", self.ctx.args.connector_name, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + _UpdateHelper._build_schema_connector_resource_read(cls._schema_on_200) + + return cls._schema_on_200 + + class ConnectorCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/connectors/{connectorName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "connectorName", self.ctx.args.connector_name, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + value=self.ctx.vars.instance, + ) + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + _UpdateHelper._build_schema_connector_resource_read(cls._schema_on_200_201) + + return cls._schema_on_200_201 + + class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance(self.ctx.vars.instance) + + def _update_instance(self, instance): + _instance_value, _builder = self.new_content_builder( + self.ctx.args, + value=instance, + typ=AAZObjectType + ) + _builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("connectorBasicInfo", AAZObjectType, ".connector_basic_info") + properties.set_prop("connectorServiceTypeInfo", AAZObjectType, ".connector_service_type_info") + properties.set_prop("partnerConnectorInfo", AAZObjectType, ".partner_connector_info") + + connector_basic_info = _builder.get(".properties.connectorBasicInfo") + if connector_basic_info is not None: + connector_basic_info.set_prop("connectorClass", AAZStrType, ".connector_class") + connector_basic_info.set_prop("connectorId", AAZStrType, ".connector_id") + connector_basic_info.set_prop("connectorName", AAZStrType, ".connector_name") + connector_basic_info.set_prop("connectorState", AAZStrType, ".connector_state") + connector_basic_info.set_prop("connectorType", AAZStrType, ".connector_type") + + connector_service_type_info = _builder.get(".properties.connectorServiceTypeInfo") + if connector_service_type_info is not None: + connector_service_type_info.set_const("connectorServiceType", "AzureBlobStorageSinkConnector", AAZStrType, ".azure_blob_storage_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureBlobStorageSourceConnector", AAZStrType, ".azure_blob_storage_source_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureCosmosDBSinkConnector", AAZStrType, ".azure_cosmos_db_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureCosmosDBSourceConnector", AAZStrType, ".azure_cosmos_db_source_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.set_const("connectorServiceType", "AzureSynapseAnalyticsSinkConnector", AAZStrType, ".azure_synapse_analytics_sink_connector", typ_kwargs={"flags": {"required": True}}) + connector_service_type_info.discriminate_by("connectorServiceType", "AzureBlobStorageSinkConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureBlobStorageSourceConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureCosmosDBSinkConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureCosmosDBSourceConnector") + connector_service_type_info.discriminate_by("connectorServiceType", "AzureSynapseAnalyticsSinkConnector") + + disc_azure_blob_storage_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureBlobStorageSinkConnector}") + if disc_azure_blob_storage_sink_connector is not None: + disc_azure_blob_storage_sink_connector.set_prop("storageAccountKey", AAZStrType, ".azure_blob_storage_sink_connector.storage_account_key") + disc_azure_blob_storage_sink_connector.set_prop("storageAccountName", AAZStrType, ".azure_blob_storage_sink_connector.storage_account_name") + disc_azure_blob_storage_sink_connector.set_prop("storageContainerName", AAZStrType, ".azure_blob_storage_sink_connector.storage_container_name") + + disc_azure_blob_storage_source_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureBlobStorageSourceConnector}") + if disc_azure_blob_storage_source_connector is not None: + disc_azure_blob_storage_source_connector.set_prop("storageAccountKey", AAZStrType, ".azure_blob_storage_source_connector.storage_account_key", typ_kwargs={"flags": {"secret": True}}) + disc_azure_blob_storage_source_connector.set_prop("storageAccountName", AAZStrType, ".azure_blob_storage_source_connector.storage_account_name") + disc_azure_blob_storage_source_connector.set_prop("storageContainerName", AAZStrType, ".azure_blob_storage_source_connector.storage_container_name") + + disc_azure_cosmos_db_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureCosmosDBSinkConnector}") + if disc_azure_cosmos_db_sink_connector is not None: + disc_azure_cosmos_db_sink_connector.set_prop("cosmosConnectionEndpoint", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_connection_endpoint") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosContainersTopicMapping", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosDatabaseName", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_database_name") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosIdStrategy", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_id_strategy") + disc_azure_cosmos_db_sink_connector.set_prop("cosmosMasterKey", AAZStrType, ".azure_cosmos_db_sink_connector.cosmos_master_key") + + disc_azure_cosmos_db_source_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureCosmosDBSourceConnector}") + if disc_azure_cosmos_db_source_connector is not None: + disc_azure_cosmos_db_source_connector.set_prop("cosmosConnectionEndpoint", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_connection_endpoint") + disc_azure_cosmos_db_source_connector.set_prop("cosmosContainersTopicMapping", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_containers_topic_mapping") + disc_azure_cosmos_db_source_connector.set_prop("cosmosDatabaseName", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_database_name") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMasterKey", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_master_key") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMessageKeyEnabled", AAZBoolType, ".azure_cosmos_db_source_connector.cosmos_message_key_enabled") + disc_azure_cosmos_db_source_connector.set_prop("cosmosMessageKeyField", AAZStrType, ".azure_cosmos_db_source_connector.cosmos_message_key_field") + + disc_azure_synapse_analytics_sink_connector = _builder.get(".properties.connectorServiceTypeInfo{connectorServiceType:AzureSynapseAnalyticsSinkConnector}") + if disc_azure_synapse_analytics_sink_connector is not None: + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlDatabaseName", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_database_name") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlPassword", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_password") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlServerName", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_server_name") + disc_azure_synapse_analytics_sink_connector.set_prop("synapseSqlUser", AAZStrType, ".azure_synapse_analytics_sink_connector.synapse_sql_user") + + partner_connector_info = _builder.get(".properties.partnerConnectorInfo") + if partner_connector_info is not None: + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureBlobStorageSink", AAZStrType, ".kafka_azure_blob_storage_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureBlobStorageSource", AAZStrType, ".kafka_azure_blob_storage_source", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureCosmosDBSink", AAZStrType, ".kafka_azure_cosmos_db_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureCosmosDBSource", AAZStrType, ".kafka_azure_cosmos_db_source", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.set_const("partnerConnectorType", "KafkaAzureSynapseAnalyticsSink", AAZStrType, ".kafka_azure_synapse_analytics_sink", typ_kwargs={"flags": {"required": True}}) + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureBlobStorageSink") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureBlobStorageSource") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureCosmosDBSink") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureCosmosDBSource") + partner_connector_info.discriminate_by("partnerConnectorType", "KafkaAzureSynapseAnalyticsSink") + + disc_kafka_azure_blob_storage_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSink}") + if disc_kafka_azure_blob_storage_sink is not None: + disc_kafka_azure_blob_storage_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_blob_storage_sink.api_key") + disc_kafka_azure_blob_storage_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_blob_storage_sink.api_secret") + disc_kafka_azure_blob_storage_sink.set_prop("authType", AAZStrType, ".kafka_azure_blob_storage_sink.auth_type") + disc_kafka_azure_blob_storage_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_blob_storage_sink.flush_size") + disc_kafka_azure_blob_storage_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_blob_storage_sink.input_format") + disc_kafka_azure_blob_storage_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_blob_storage_sink.max_tasks") + disc_kafka_azure_blob_storage_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_blob_storage_sink.output_format") + disc_kafka_azure_blob_storage_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_blob_storage_sink.service_account_id") + disc_kafka_azure_blob_storage_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_blob_storage_sink.time_interval") + disc_kafka_azure_blob_storage_sink.set_prop("topics", AAZListType, ".kafka_azure_blob_storage_sink.topics") + disc_kafka_azure_blob_storage_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_blob_storage_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + disc_kafka_azure_blob_storage_source = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureBlobStorageSource}") + if disc_kafka_azure_blob_storage_source is not None: + disc_kafka_azure_blob_storage_source.set_prop("apiKey", AAZStrType, ".kafka_azure_blob_storage_source.api_key") + disc_kafka_azure_blob_storage_source.set_prop("apiSecret", AAZStrType, ".kafka_azure_blob_storage_source.api_secret", typ_kwargs={"flags": {"secret": True}}) + disc_kafka_azure_blob_storage_source.set_prop("authType", AAZStrType, ".kafka_azure_blob_storage_source.auth_type") + disc_kafka_azure_blob_storage_source.set_prop("inputFormat", AAZStrType, ".kafka_azure_blob_storage_source.input_format") + disc_kafka_azure_blob_storage_source.set_prop("maxTasks", AAZStrType, ".kafka_azure_blob_storage_source.max_tasks") + disc_kafka_azure_blob_storage_source.set_prop("outputFormat", AAZStrType, ".kafka_azure_blob_storage_source.output_format") + disc_kafka_azure_blob_storage_source.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_blob_storage_source.service_account_id") + disc_kafka_azure_blob_storage_source.set_prop("topicRegex", AAZStrType, ".kafka_azure_blob_storage_source.topic_regex") + disc_kafka_azure_blob_storage_source.set_prop("topicsDir", AAZStrType, ".kafka_azure_blob_storage_source.topics_dir") + + disc_kafka_azure_cosmos_db_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSink}") + if disc_kafka_azure_cosmos_db_sink is not None: + disc_kafka_azure_cosmos_db_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_cosmos_db_sink.api_key") + disc_kafka_azure_cosmos_db_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_cosmos_db_sink.api_secret") + disc_kafka_azure_cosmos_db_sink.set_prop("authType", AAZStrType, ".kafka_azure_cosmos_db_sink.auth_type") + disc_kafka_azure_cosmos_db_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_cosmos_db_sink.flush_size") + disc_kafka_azure_cosmos_db_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_cosmos_db_sink.input_format") + disc_kafka_azure_cosmos_db_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_cosmos_db_sink.max_tasks") + disc_kafka_azure_cosmos_db_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_cosmos_db_sink.output_format") + disc_kafka_azure_cosmos_db_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_cosmos_db_sink.service_account_id") + disc_kafka_azure_cosmos_db_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_cosmos_db_sink.time_interval") + disc_kafka_azure_cosmos_db_sink.set_prop("topics", AAZListType, ".kafka_azure_cosmos_db_sink.topics") + disc_kafka_azure_cosmos_db_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_cosmos_db_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + disc_kafka_azure_cosmos_db_source = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureCosmosDBSource}") + if disc_kafka_azure_cosmos_db_source is not None: + disc_kafka_azure_cosmos_db_source.set_prop("apiKey", AAZStrType, ".kafka_azure_cosmos_db_source.api_key") + disc_kafka_azure_cosmos_db_source.set_prop("apiSecret", AAZStrType, ".kafka_azure_cosmos_db_source.api_secret", typ_kwargs={"flags": {"secret": True}}) + disc_kafka_azure_cosmos_db_source.set_prop("authType", AAZStrType, ".kafka_azure_cosmos_db_source.auth_type") + disc_kafka_azure_cosmos_db_source.set_prop("inputFormat", AAZStrType, ".kafka_azure_cosmos_db_source.input_format") + disc_kafka_azure_cosmos_db_source.set_prop("maxTasks", AAZStrType, ".kafka_azure_cosmos_db_source.max_tasks") + disc_kafka_azure_cosmos_db_source.set_prop("outputFormat", AAZStrType, ".kafka_azure_cosmos_db_source.output_format") + disc_kafka_azure_cosmos_db_source.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_cosmos_db_source.service_account_id") + disc_kafka_azure_cosmos_db_source.set_prop("topicRegex", AAZStrType, ".kafka_azure_cosmos_db_source.topic_regex") + disc_kafka_azure_cosmos_db_source.set_prop("topicsDir", AAZStrType, ".kafka_azure_cosmos_db_source.topics_dir") + + disc_kafka_azure_synapse_analytics_sink = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureSynapseAnalyticsSink}") + if disc_kafka_azure_synapse_analytics_sink is not None: + disc_kafka_azure_synapse_analytics_sink.set_prop("apiKey", AAZStrType, ".kafka_azure_synapse_analytics_sink.api_key") + disc_kafka_azure_synapse_analytics_sink.set_prop("apiSecret", AAZStrType, ".kafka_azure_synapse_analytics_sink.api_secret") + disc_kafka_azure_synapse_analytics_sink.set_prop("authType", AAZStrType, ".kafka_azure_synapse_analytics_sink.auth_type") + disc_kafka_azure_synapse_analytics_sink.set_prop("flushSize", AAZStrType, ".kafka_azure_synapse_analytics_sink.flush_size") + disc_kafka_azure_synapse_analytics_sink.set_prop("inputFormat", AAZStrType, ".kafka_azure_synapse_analytics_sink.input_format") + disc_kafka_azure_synapse_analytics_sink.set_prop("maxTasks", AAZStrType, ".kafka_azure_synapse_analytics_sink.max_tasks") + disc_kafka_azure_synapse_analytics_sink.set_prop("outputFormat", AAZStrType, ".kafka_azure_synapse_analytics_sink.output_format") + disc_kafka_azure_synapse_analytics_sink.set_prop("serviceAccountId", AAZStrType, ".kafka_azure_synapse_analytics_sink.service_account_id") + disc_kafka_azure_synapse_analytics_sink.set_prop("timeInterval", AAZStrType, ".kafka_azure_synapse_analytics_sink.time_interval") + disc_kafka_azure_synapse_analytics_sink.set_prop("topics", AAZListType, ".kafka_azure_synapse_analytics_sink.topics") + disc_kafka_azure_synapse_analytics_sink.set_prop("topicsDir", AAZStrType, ".kafka_azure_synapse_analytics_sink.topics_dir") + + topics = _builder.get(".properties.partnerConnectorInfo{partnerConnectorType:KafkaAzureSynapseAnalyticsSink}.topics") + if topics is not None: + topics.set_elements(AAZStrType, ".") + + return _instance_value + + class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance_by_generic( + self.ctx.vars.instance, + self.ctx.generic_update_args + ) + + +class _UpdateHelper: + """Helper class for Update""" + + _schema_connector_resource_read = None + + @classmethod + def _build_schema_connector_resource_read(cls, _schema): + if cls._schema_connector_resource_read is not None: + _schema.id = cls._schema_connector_resource_read.id + _schema.name = cls._schema_connector_resource_read.name + _schema.properties = cls._schema_connector_resource_read.properties + _schema.system_data = cls._schema_connector_resource_read.system_data + _schema.type = cls._schema_connector_resource_read.type + return + + cls._schema_connector_resource_read = _schema_connector_resource_read = AAZObjectType() + + connector_resource_read = _schema_connector_resource_read + connector_resource_read.id = AAZStrType( + flags={"read_only": True}, + ) + connector_resource_read.name = AAZStrType( + flags={"read_only": True}, + ) + connector_resource_read.properties = AAZObjectType( + flags={"required": True, "client_flatten": True}, + ) + connector_resource_read.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + connector_resource_read.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = _schema_connector_resource_read.properties + properties.connector_basic_info = AAZObjectType( + serialized_name="connectorBasicInfo", + ) + properties.connector_service_type_info = AAZObjectType( + serialized_name="connectorServiceTypeInfo", + ) + properties.partner_connector_info = AAZObjectType( + serialized_name="partnerConnectorInfo", + ) + + connector_basic_info = _schema_connector_resource_read.properties.connector_basic_info + connector_basic_info.connector_class = AAZStrType( + serialized_name="connectorClass", + ) + connector_basic_info.connector_id = AAZStrType( + serialized_name="connectorId", + ) + connector_basic_info.connector_name = AAZStrType( + serialized_name="connectorName", + ) + connector_basic_info.connector_state = AAZStrType( + serialized_name="connectorState", + ) + connector_basic_info.connector_type = AAZStrType( + serialized_name="connectorType", + ) + + connector_service_type_info = _schema_connector_resource_read.properties.connector_service_type_info + connector_service_type_info.connector_service_type = AAZStrType( + serialized_name="connectorServiceType", + flags={"required": True}, + ) + + disc_azure_blob_storage_sink_connector = _schema_connector_resource_read.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSinkConnector") + disc_azure_blob_storage_sink_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + ) + disc_azure_blob_storage_sink_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_sink_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_blob_storage_source_connector = _schema_connector_resource_read.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureBlobStorageSourceConnector") + disc_azure_blob_storage_source_connector.storage_account_key = AAZStrType( + serialized_name="storageAccountKey", + flags={"secret": True}, + ) + disc_azure_blob_storage_source_connector.storage_account_name = AAZStrType( + serialized_name="storageAccountName", + ) + disc_azure_blob_storage_source_connector.storage_container_name = AAZStrType( + serialized_name="storageContainerName", + ) + + disc_azure_cosmos_db_sink_connector = _schema_connector_resource_read.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSinkConnector") + disc_azure_cosmos_db_sink_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_sink_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_sink_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_sink_connector.cosmos_id_strategy = AAZStrType( + serialized_name="cosmosIdStrategy", + ) + disc_azure_cosmos_db_sink_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + + disc_azure_cosmos_db_source_connector = _schema_connector_resource_read.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureCosmosDBSourceConnector") + disc_azure_cosmos_db_source_connector.cosmos_connection_endpoint = AAZStrType( + serialized_name="cosmosConnectionEndpoint", + ) + disc_azure_cosmos_db_source_connector.cosmos_containers_topic_mapping = AAZStrType( + serialized_name="cosmosContainersTopicMapping", + ) + disc_azure_cosmos_db_source_connector.cosmos_database_name = AAZStrType( + serialized_name="cosmosDatabaseName", + ) + disc_azure_cosmos_db_source_connector.cosmos_master_key = AAZStrType( + serialized_name="cosmosMasterKey", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_enabled = AAZBoolType( + serialized_name="cosmosMessageKeyEnabled", + ) + disc_azure_cosmos_db_source_connector.cosmos_message_key_field = AAZStrType( + serialized_name="cosmosMessageKeyField", + ) + + disc_azure_synapse_analytics_sink_connector = _schema_connector_resource_read.properties.connector_service_type_info.discriminate_by("connector_service_type", "AzureSynapseAnalyticsSinkConnector") + disc_azure_synapse_analytics_sink_connector.synapse_sql_database_name = AAZStrType( + serialized_name="synapseSqlDatabaseName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_password = AAZStrType( + serialized_name="synapseSqlPassword", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_server_name = AAZStrType( + serialized_name="synapseSqlServerName", + ) + disc_azure_synapse_analytics_sink_connector.synapse_sql_user = AAZStrType( + serialized_name="synapseSqlUser", + ) + + partner_connector_info = _schema_connector_resource_read.properties.partner_connector_info + partner_connector_info.partner_connector_type = AAZStrType( + serialized_name="partnerConnectorType", + flags={"required": True}, + ) + + disc_kafka_azure_blob_storage_sink = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink") + disc_kafka_azure_blob_storage_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_blob_storage_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_blob_storage_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_blob_storage_sink.topics = AAZListType() + disc_kafka_azure_blob_storage_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_blob_storage_source = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureBlobStorageSource") + disc_kafka_azure_blob_storage_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_blob_storage_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_blob_storage_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_blob_storage_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_blob_storage_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_blob_storage_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_blob_storage_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_blob_storage_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_blob_storage_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_cosmos_db_sink = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink") + disc_kafka_azure_cosmos_db_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_cosmos_db_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_cosmos_db_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_cosmos_db_sink.topics = AAZListType() + disc_kafka_azure_cosmos_db_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSink").topics + topics.Element = AAZStrType() + + disc_kafka_azure_cosmos_db_source = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureCosmosDBSource") + disc_kafka_azure_cosmos_db_source.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_cosmos_db_source.api_secret = AAZStrType( + serialized_name="apiSecret", + flags={"secret": True}, + ) + disc_kafka_azure_cosmos_db_source.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_cosmos_db_source.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_cosmos_db_source.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_cosmos_db_source.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_cosmos_db_source.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_cosmos_db_source.topic_regex = AAZStrType( + serialized_name="topicRegex", + ) + disc_kafka_azure_cosmos_db_source.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + disc_kafka_azure_synapse_analytics_sink = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink") + disc_kafka_azure_synapse_analytics_sink.api_key = AAZStrType( + serialized_name="apiKey", + ) + disc_kafka_azure_synapse_analytics_sink.api_secret = AAZStrType( + serialized_name="apiSecret", + ) + disc_kafka_azure_synapse_analytics_sink.auth_type = AAZStrType( + serialized_name="authType", + ) + disc_kafka_azure_synapse_analytics_sink.flush_size = AAZStrType( + serialized_name="flushSize", + ) + disc_kafka_azure_synapse_analytics_sink.input_format = AAZStrType( + serialized_name="inputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.max_tasks = AAZStrType( + serialized_name="maxTasks", + ) + disc_kafka_azure_synapse_analytics_sink.output_format = AAZStrType( + serialized_name="outputFormat", + ) + disc_kafka_azure_synapse_analytics_sink.service_account_id = AAZStrType( + serialized_name="serviceAccountId", + ) + disc_kafka_azure_synapse_analytics_sink.time_interval = AAZStrType( + serialized_name="timeInterval", + ) + disc_kafka_azure_synapse_analytics_sink.topics = AAZListType() + disc_kafka_azure_synapse_analytics_sink.topics_dir = AAZStrType( + serialized_name="topicsDir", + ) + + topics = _schema_connector_resource_read.properties.partner_connector_info.discriminate_by("partner_connector_type", "KafkaAzureSynapseAnalyticsSink").topics + topics.Element = AAZStrType() + + system_data = _schema_connector_resource_read.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + _schema.id = cls._schema_connector_resource_read.id + _schema.name = cls._schema_connector_resource_read.name + _schema.properties = cls._schema_connector_resource_read.properties + _schema.system_data = cls._schema_connector_resource_read.system_data + _schema.type = cls._schema_connector_resource_read.type + + +__all__ = ["Update"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__cmd_group.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__cmd_group.py new file mode 100644 index 00000000000..b46b0a18e78 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "confluent organization environment cluster topic", +) +class __CMDGroup(AAZCommandGroup): + """Manage Topic + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__init__.py new file mode 100644 index 00000000000..c401f439385 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/__init__.py @@ -0,0 +1,16 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._create import * +from ._delete import * +from ._list import * +from ._show import * +from ._update import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_create.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_create.py new file mode 100644 index 00000000000..227660b176b --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_create.py @@ -0,0 +1,390 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster topic create", +) +class Create(AAZCommand): + """Create confluent topics by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/topics/{}", "2024-07-01"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.topic_name = AAZStrArg( + options=["-n", "--name", "--topic-name"], + help="Confluent kafka or schema registry topic name", + required=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.configs = AAZObjectArg( + options=["--configs"], + arg_group="Properties", + help="Config Specification of the topic", + ) + cls._build_args_topics_related_link_create(_args_schema.configs) + _args_schema.input_configs = AAZListArg( + options=["--input-configs"], + arg_group="Properties", + help="Input Config Specification of the topic", + ) + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Properties", + help="Type of topic", + ) + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + ) + _args_schema.partitions = AAZObjectArg( + options=["--partitions"], + arg_group="Properties", + help="Partition Specification of the topic", + ) + cls._build_args_topics_related_link_create(_args_schema.partitions) + _args_schema.partitions_count = AAZStrArg( + options=["--partitions-count"], + arg_group="Properties", + help="Partition count of the topic", + ) + _args_schema.partitions_reassignments = AAZObjectArg( + options=["--partitions-reassignments"], + arg_group="Properties", + help="Partition Reassignment Specification of the topic", + ) + cls._build_args_topics_related_link_create(_args_schema.partitions_reassignments) + _args_schema.replication_factor = AAZStrArg( + options=["--replication-factor"], + arg_group="Properties", + help="Replication factor of the topic", + ) + _args_schema.topic_id = AAZStrArg( + options=["--topic-id"], + arg_group="Properties", + help="Topic Id returned by Confluent", + ) + + input_configs = cls._args_schema.input_configs + input_configs.Element = AAZObjectArg() + + _element = cls._args_schema.input_configs.Element + _element.name = AAZStrArg( + options=["name"], + help="Name of the topic input config", + ) + _element.value = AAZStrArg( + options=["value"], + help="Value of the topic input config", + ) + + metadata = cls._args_schema.metadata + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + ) + return cls._args_schema + + _args_topics_related_link_create = None + + @classmethod + def _build_args_topics_related_link_create(cls, _schema): + if cls._args_topics_related_link_create is not None: + _schema.related = cls._args_topics_related_link_create.related + return + + cls._args_topics_related_link_create = AAZObjectArg() + + topics_related_link_create = cls._args_topics_related_link_create + topics_related_link_create.related = AAZStrArg( + options=["related"], + help="Relationship of the topic", + ) + + _schema.related = cls._args_topics_related_link_create.related + + def _execute_operations(self): + self.pre_operations() + self.TopicsCreate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class TopicsCreate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics/{topicName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + **self.serialize_url_param( + "topicName", self.ctx.args.topic_name, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"client_flatten": True}} + ) + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + _CreateHelper._build_schema_topics_related_link_create(properties.set_prop("configs", AAZObjectType, ".configs")) + properties.set_prop("inputConfigs", AAZListType, ".input_configs") + properties.set_prop("kind", AAZStrType, ".kind") + properties.set_prop("metadata", AAZObjectType, ".metadata") + _CreateHelper._build_schema_topics_related_link_create(properties.set_prop("partitions", AAZObjectType, ".partitions")) + properties.set_prop("partitionsCount", AAZStrType, ".partitions_count") + _CreateHelper._build_schema_topics_related_link_create(properties.set_prop("partitionsReassignments", AAZObjectType, ".partitions_reassignments")) + properties.set_prop("replicationFactor", AAZStrType, ".replication_factor") + properties.set_prop("topicId", AAZStrType, ".topic_id") + + input_configs = _builder.get(".properties.inputConfigs") + if input_configs is not None: + input_configs.set_elements(AAZObjectType, ".") + + _elements = _builder.get(".properties.inputConfigs[]") + if _elements is not None: + _elements.set_prop("name", AAZStrType, ".name") + _elements.set_prop("value", AAZStrType, ".value") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + + _schema_on_200_201 = cls._schema_on_200_201 + _schema_on_200_201.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200_201.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200_201.properties + properties.configs = AAZObjectType() + _CreateHelper._build_schema_topics_related_link_read(properties.configs) + properties.input_configs = AAZListType( + serialized_name="inputConfigs", + ) + properties.kind = AAZStrType() + properties.metadata = AAZObjectType() + properties.partitions = AAZObjectType() + _CreateHelper._build_schema_topics_related_link_read(properties.partitions) + properties.partitions_count = AAZStrType( + serialized_name="partitionsCount", + ) + properties.partitions_reassignments = AAZObjectType( + serialized_name="partitionsReassignments", + ) + _CreateHelper._build_schema_topics_related_link_read(properties.partitions_reassignments) + properties.replication_factor = AAZStrType( + serialized_name="replicationFactor", + ) + properties.topic_id = AAZStrType( + serialized_name="topicId", + ) + + input_configs = cls._schema_on_200_201.properties.input_configs + input_configs.Element = AAZObjectType() + + _element = cls._schema_on_200_201.properties.input_configs.Element + _element.name = AAZStrType() + _element.value = AAZStrType() + + metadata = cls._schema_on_200_201.properties.metadata + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + + return cls._schema_on_200_201 + + +class _CreateHelper: + """Helper class for Create""" + + @classmethod + def _build_schema_topics_related_link_create(cls, _builder): + if _builder is None: + return + _builder.set_prop("related", AAZStrType, ".related") + + _schema_topics_related_link_read = None + + @classmethod + def _build_schema_topics_related_link_read(cls, _schema): + if cls._schema_topics_related_link_read is not None: + _schema.related = cls._schema_topics_related_link_read.related + return + + cls._schema_topics_related_link_read = _schema_topics_related_link_read = AAZObjectType() + + topics_related_link_read = _schema_topics_related_link_read + topics_related_link_read.related = AAZStrType() + + _schema.related = cls._schema_topics_related_link_read.related + + +__all__ = ["Create"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_delete.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_delete.py new file mode 100644 index 00000000000..ba20f7b841e --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_delete.py @@ -0,0 +1,190 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster topic delete", + confirmation="Are you sure you want to perform this operation?", +) +class Delete(AAZCommand): + """Delete confluent topic by name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/topics/{}", "2024-07-01"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, None) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.topic_name = AAZStrArg( + options=["-n", "--name", "--topic-name"], + help="Confluent kafka or schema registry topic name", + required=True, + id_part="child_name_3", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + yield self.TopicsDelete(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + class TopicsDelete(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [204]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_204, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics/{topicName}", + **self.url_parameters + ) + + @property + def method(self): + return "DELETE" + + @property + def error_format(self): + return "MgmtErrorFormat" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + **self.serialize_url_param( + "topicName", self.ctx.args.topic_name, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + def on_204(self, session): + pass + + def on_200_201(self, session): + pass + + +class _DeleteHelper: + """Helper class for Delete""" + + +__all__ = ["Delete"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_list.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_list.py new file mode 100644 index 00000000000..5ecd2167e3a --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_list.py @@ -0,0 +1,267 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster topic list", +) +class List(AAZCommand): + """List of all the topics in a clusters + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/topics", "2024-07-01"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.page_size = AAZIntArg( + options=["--page-size"], + help="Pagination size", + ) + _args_schema.page_token = AAZStrArg( + options=["--page-token"], + help="An opaque pagination token to fetch the next set of records", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.TopicsList(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class TopicsList(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "pageSize", self.ctx.args.page_size, + ), + **self.serialize_query_param( + "pageToken", self.ctx.args.page_token, + ), + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType() + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.value.Element.properties + properties.configs = AAZObjectType() + _ListHelper._build_schema_topics_related_link_read(properties.configs) + properties.input_configs = AAZListType( + serialized_name="inputConfigs", + ) + properties.kind = AAZStrType() + properties.metadata = AAZObjectType() + properties.partitions = AAZObjectType() + _ListHelper._build_schema_topics_related_link_read(properties.partitions) + properties.partitions_count = AAZStrType( + serialized_name="partitionsCount", + ) + properties.partitions_reassignments = AAZObjectType( + serialized_name="partitionsReassignments", + ) + _ListHelper._build_schema_topics_related_link_read(properties.partitions_reassignments) + properties.replication_factor = AAZStrType( + serialized_name="replicationFactor", + ) + properties.topic_id = AAZStrType( + serialized_name="topicId", + ) + + input_configs = cls._schema_on_200.value.Element.properties.input_configs + input_configs.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.input_configs.Element + _element.name = AAZStrType() + _element.value = AAZStrType() + + metadata = cls._schema_on_200.value.Element.properties.metadata + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + _schema_topics_related_link_read = None + + @classmethod + def _build_schema_topics_related_link_read(cls, _schema): + if cls._schema_topics_related_link_read is not None: + _schema.related = cls._schema_topics_related_link_read.related + return + + cls._schema_topics_related_link_read = _schema_topics_related_link_read = AAZObjectType() + + topics_related_link_read = _schema_topics_related_link_read + topics_related_link_read.related = AAZStrType() + + _schema.related = cls._schema_topics_related_link_read.related + + +__all__ = ["List"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_show.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_show.py new file mode 100644 index 00000000000..e3d5a091beb --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_show.py @@ -0,0 +1,255 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster topic show", +) +class Show(AAZCommand): + """Get confluent topic by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/topics/{}", "2024-07-01"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.topic_name = AAZStrArg( + options=["-n", "--name", "--topic-name"], + help="Confluent kafka or schema registry topic name", + required=True, + id_part="child_name_3", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.TopicsGet(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class TopicsGet(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics/{topicName}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + **self.serialize_url_param( + "topicName", self.ctx.args.topic_name, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.properties + properties.configs = AAZObjectType() + _ShowHelper._build_schema_topics_related_link_read(properties.configs) + properties.input_configs = AAZListType( + serialized_name="inputConfigs", + ) + properties.kind = AAZStrType() + properties.metadata = AAZObjectType() + properties.partitions = AAZObjectType() + _ShowHelper._build_schema_topics_related_link_read(properties.partitions) + properties.partitions_count = AAZStrType( + serialized_name="partitionsCount", + ) + properties.partitions_reassignments = AAZObjectType( + serialized_name="partitionsReassignments", + ) + _ShowHelper._build_schema_topics_related_link_read(properties.partitions_reassignments) + properties.replication_factor = AAZStrType( + serialized_name="replicationFactor", + ) + properties.topic_id = AAZStrType( + serialized_name="topicId", + ) + + input_configs = cls._schema_on_200.properties.input_configs + input_configs.Element = AAZObjectType() + + _element = cls._schema_on_200.properties.input_configs.Element + _element.name = AAZStrType() + _element.value = AAZStrType() + + metadata = cls._schema_on_200.properties.metadata + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + + return cls._schema_on_200 + + +class _ShowHelper: + """Helper class for Show""" + + _schema_topics_related_link_read = None + + @classmethod + def _build_schema_topics_related_link_read(cls, _schema): + if cls._schema_topics_related_link_read is not None: + _schema.related = cls._schema_topics_related_link_read.related + return + + cls._schema_topics_related_link_read = _schema_topics_related_link_read = AAZObjectType() + + topics_related_link_read = _schema_topics_related_link_read + topics_related_link_read.related = AAZStrType() + + _schema.related = cls._schema_topics_related_link_read.related + + +__all__ = ["Show"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_update.py b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_update.py new file mode 100644 index 00000000000..71fade572c6 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/organization/environment/cluster/topic/_update.py @@ -0,0 +1,562 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent organization environment cluster topic update", +) +class Update(AAZCommand): + """Update confluent topics by Name + """ + + _aaz_info = { + "version": "2024-07-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/organizations/{}/environments/{}/clusters/{}/topics/{}", "2024-07-01"], + ] + } + + AZ_SUPPORT_GENERIC_UPDATE = True + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.cluster_id = AAZStrArg( + options=["--cluster-id"], + help="Confluent kafka or schema registry cluster id", + required=True, + id_part="child_name_2", + ) + _args_schema.environment_id = AAZStrArg( + options=["--environment-id"], + help="Confluent environment id", + required=True, + id_part="child_name_1", + ) + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + _args_schema.topic_name = AAZStrArg( + options=["-n", "--name", "--topic-name"], + help="Confluent kafka or schema registry topic name", + required=True, + id_part="child_name_3", + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.configs = AAZObjectArg( + options=["--configs"], + arg_group="Properties", + help="Config Specification of the topic", + nullable=True, + ) + cls._build_args_topics_related_link_update(_args_schema.configs) + _args_schema.input_configs = AAZListArg( + options=["--input-configs"], + arg_group="Properties", + help="Input Config Specification of the topic", + nullable=True, + ) + _args_schema.kind = AAZStrArg( + options=["--kind"], + arg_group="Properties", + help="Type of topic", + nullable=True, + ) + _args_schema.metadata = AAZObjectArg( + options=["--metadata"], + arg_group="Properties", + help="Metadata of the record", + nullable=True, + ) + _args_schema.partitions = AAZObjectArg( + options=["--partitions"], + arg_group="Properties", + help="Partition Specification of the topic", + nullable=True, + ) + cls._build_args_topics_related_link_update(_args_schema.partitions) + _args_schema.partitions_count = AAZStrArg( + options=["--partitions-count"], + arg_group="Properties", + help="Partition count of the topic", + nullable=True, + ) + _args_schema.partitions_reassignments = AAZObjectArg( + options=["--partitions-reassignments"], + arg_group="Properties", + help="Partition Reassignment Specification of the topic", + nullable=True, + ) + cls._build_args_topics_related_link_update(_args_schema.partitions_reassignments) + _args_schema.replication_factor = AAZStrArg( + options=["--replication-factor"], + arg_group="Properties", + help="Replication factor of the topic", + nullable=True, + ) + _args_schema.topic_id = AAZStrArg( + options=["--topic-id"], + arg_group="Properties", + help="Topic Id returned by Confluent", + nullable=True, + ) + + input_configs = cls._args_schema.input_configs + input_configs.Element = AAZObjectArg( + nullable=True, + ) + + _element = cls._args_schema.input_configs.Element + _element.name = AAZStrArg( + options=["name"], + help="Name of the topic input config", + nullable=True, + ) + _element.value = AAZStrArg( + options=["value"], + help="Value of the topic input config", + nullable=True, + ) + + metadata = cls._args_schema.metadata + metadata.resource_name = AAZStrArg( + options=["resource-name"], + help="Resource name of the record", + nullable=True, + ) + metadata.self = AAZStrArg( + options=["self"], + help="Self lookup url", + nullable=True, + ) + return cls._args_schema + + _args_topics_related_link_update = None + + @classmethod + def _build_args_topics_related_link_update(cls, _schema): + if cls._args_topics_related_link_update is not None: + _schema.related = cls._args_topics_related_link_update.related + return + + cls._args_topics_related_link_update = AAZObjectArg( + nullable=True, + ) + + topics_related_link_update = cls._args_topics_related_link_update + topics_related_link_update.related = AAZStrArg( + options=["related"], + help="Relationship of the topic", + nullable=True, + ) + + _schema.related = cls._args_topics_related_link_update.related + + def _execute_operations(self): + self.pre_operations() + self.TopicsGet(ctx=self.ctx)() + self.pre_instance_update(self.ctx.vars.instance) + self.InstanceUpdateByJson(ctx=self.ctx)() + self.InstanceUpdateByGeneric(ctx=self.ctx)() + self.post_instance_update(self.ctx.vars.instance) + self.TopicsCreate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + @register_callback + def pre_instance_update(self, instance): + pass + + @register_callback + def post_instance_update(self, instance): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class TopicsGet(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics/{topicName}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + **self.serialize_url_param( + "topicName", self.ctx.args.topic_name, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + _UpdateHelper._build_schema_topic_record_read(cls._schema_on_200) + + return cls._schema_on_200 + + class TopicsCreate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200, 201]: + return self.on_200_201(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/organizations/{organizationName}/environments/{environmentId}/clusters/{clusterId}/topics/{topicName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "clusterId", self.ctx.args.cluster_id, + required=True, + ), + **self.serialize_url_param( + "environmentId", self.ctx.args.environment_id, + required=True, + ), + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + **self.serialize_url_param( + "topicName", self.ctx.args.topic_name, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-07-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + value=self.ctx.vars.instance, + ) + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + _UpdateHelper._build_schema_topic_record_read(cls._schema_on_200_201) + + return cls._schema_on_200_201 + + class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance(self.ctx.vars.instance) + + def _update_instance(self, instance): + _instance_value, _builder = self.new_content_builder( + self.ctx.args, + value=instance, + typ=AAZObjectType + ) + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + + properties = _builder.get(".properties") + if properties is not None: + _UpdateHelper._build_schema_topics_related_link_update(properties.set_prop("configs", AAZObjectType, ".configs")) + properties.set_prop("inputConfigs", AAZListType, ".input_configs") + properties.set_prop("kind", AAZStrType, ".kind") + properties.set_prop("metadata", AAZObjectType, ".metadata") + _UpdateHelper._build_schema_topics_related_link_update(properties.set_prop("partitions", AAZObjectType, ".partitions")) + properties.set_prop("partitionsCount", AAZStrType, ".partitions_count") + _UpdateHelper._build_schema_topics_related_link_update(properties.set_prop("partitionsReassignments", AAZObjectType, ".partitions_reassignments")) + properties.set_prop("replicationFactor", AAZStrType, ".replication_factor") + properties.set_prop("topicId", AAZStrType, ".topic_id") + + input_configs = _builder.get(".properties.inputConfigs") + if input_configs is not None: + input_configs.set_elements(AAZObjectType, ".") + + _elements = _builder.get(".properties.inputConfigs[]") + if _elements is not None: + _elements.set_prop("name", AAZStrType, ".name") + _elements.set_prop("value", AAZStrType, ".value") + + metadata = _builder.get(".properties.metadata") + if metadata is not None: + metadata.set_prop("resourceName", AAZStrType, ".resource_name") + metadata.set_prop("self", AAZStrType, ".self") + + return _instance_value + + class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance_by_generic( + self.ctx.vars.instance, + self.ctx.generic_update_args + ) + + +class _UpdateHelper: + """Helper class for Update""" + + @classmethod + def _build_schema_topics_related_link_update(cls, _builder): + if _builder is None: + return + _builder.set_prop("related", AAZStrType, ".related") + + _schema_topic_record_read = None + + @classmethod + def _build_schema_topic_record_read(cls, _schema): + if cls._schema_topic_record_read is not None: + _schema.id = cls._schema_topic_record_read.id + _schema.name = cls._schema_topic_record_read.name + _schema.properties = cls._schema_topic_record_read.properties + _schema.type = cls._schema_topic_record_read.type + return + + cls._schema_topic_record_read = _schema_topic_record_read = AAZObjectType() + + topic_record_read = _schema_topic_record_read + topic_record_read.id = AAZStrType( + flags={"read_only": True}, + ) + topic_record_read.name = AAZStrType( + flags={"read_only": True}, + ) + topic_record_read.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + topic_record_read.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = _schema_topic_record_read.properties + properties.configs = AAZObjectType() + cls._build_schema_topics_related_link_read(properties.configs) + properties.input_configs = AAZListType( + serialized_name="inputConfigs", + ) + properties.kind = AAZStrType() + properties.metadata = AAZObjectType() + properties.partitions = AAZObjectType() + cls._build_schema_topics_related_link_read(properties.partitions) + properties.partitions_count = AAZStrType( + serialized_name="partitionsCount", + ) + properties.partitions_reassignments = AAZObjectType( + serialized_name="partitionsReassignments", + ) + cls._build_schema_topics_related_link_read(properties.partitions_reassignments) + properties.replication_factor = AAZStrType( + serialized_name="replicationFactor", + ) + properties.topic_id = AAZStrType( + serialized_name="topicId", + ) + + input_configs = _schema_topic_record_read.properties.input_configs + input_configs.Element = AAZObjectType() + + _element = _schema_topic_record_read.properties.input_configs.Element + _element.name = AAZStrType() + _element.value = AAZStrType() + + metadata = _schema_topic_record_read.properties.metadata + metadata.resource_name = AAZStrType( + serialized_name="resourceName", + ) + metadata.self = AAZStrType() + + _schema.id = cls._schema_topic_record_read.id + _schema.name = cls._schema_topic_record_read.name + _schema.properties = cls._schema_topic_record_read.properties + _schema.type = cls._schema_topic_record_read.type + + _schema_topics_related_link_read = None + + @classmethod + def _build_schema_topics_related_link_read(cls, _schema): + if cls._schema_topics_related_link_read is not None: + _schema.related = cls._schema_topics_related_link_read.related + return + + cls._schema_topics_related_link_read = _schema_topics_related_link_read = AAZObjectType() + + topics_related_link_read = _schema_topics_related_link_read + topics_related_link_read.related = AAZStrType() + + _schema.related = cls._schema_topics_related_link_read.related + + +__all__ = ["Update"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/validation/__cmd_group.py b/src/confluent/azext_confluent/aaz/latest/confluent/validation/__cmd_group.py new file mode 100644 index 00000000000..e2a1e355487 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/validation/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "confluent validation", +) +class __CMDGroup(AAZCommandGroup): + """Manage Validation + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/validation/__init__.py b/src/confluent/azext_confluent/aaz/latest/confluent/validation/__init__.py new file mode 100644 index 00000000000..6d3edf0ca3a --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/validation/__init__.py @@ -0,0 +1,13 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._orgvalidate import * +from ._orgvalidate_v2 import * diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate.py b/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate.py new file mode 100644 index 00000000000..3a9fc0c3b99 --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate.py @@ -0,0 +1,481 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent validation orgvalidate", +) +class Orgvalidate(AAZCommand): + """Organization Validate proxy resource + """ + + _aaz_info = { + "version": "2024-02-13", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/validations/{}/orgvalidate", "2024-02-13"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.location = AAZResourceLocationArg( + arg_group="Body", + help="Location of Organization resource", + fmt=AAZResourceLocationArgFormat( + resource_group_arg="resource_group", + ), + ) + _args_schema.tags = AAZDictArg( + options=["--tags"], + arg_group="Body", + help="Organization resource tags", + ) + + tags = cls._args_schema.tags + tags.Element = AAZStrArg() + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.link_organization = AAZObjectArg( + options=["--link-organization"], + arg_group="Properties", + help="Link an existing Confluent organization", + ) + _args_schema.offer_detail = AAZObjectArg( + options=["--offer-detail"], + arg_group="Properties", + help="Confluent offer detail", + required=True, + ) + _args_schema.user_detail = AAZObjectArg( + options=["--user-detail"], + arg_group="Properties", + help="Subscriber detail", + required=True, + ) + + link_organization = cls._args_schema.link_organization + link_organization.token = AAZStrArg( + options=["token"], + help="User auth token", + required=True, + ) + + offer_detail = cls._args_schema.offer_detail + offer_detail.id = AAZStrArg( + options=["id"], + help="Offer Id", + required=True, + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.plan_id = AAZStrArg( + options=["plan-id"], + help="Offer Plan Id", + required=True, + fmt=AAZStrArgFormat( + max_length=200, + ), + ) + offer_detail.plan_name = AAZStrArg( + options=["plan-name"], + help="Offer Plan Name", + required=True, + fmt=AAZStrArgFormat( + max_length=200, + ), + ) + offer_detail.private_offer_id = AAZStrArg( + options=["private-offer-id"], + help="Private Offer Id", + fmt=AAZStrArgFormat( + max_length=255, + ), + ) + offer_detail.private_offer_ids = AAZListArg( + options=["private-offer-ids"], + help="Array of Private Offer Ids", + ) + offer_detail.publisher_id = AAZStrArg( + options=["publisher-id"], + help="Publisher Id", + required=True, + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.status = AAZStrArg( + options=["status"], + help="SaaS Offer Status", + enum={"Failed": "Failed", "InProgress": "InProgress", "PendingFulfillmentStart": "PendingFulfillmentStart", "Reinstated": "Reinstated", "Started": "Started", "Subscribed": "Subscribed", "Succeeded": "Succeeded", "Suspended": "Suspended", "Unsubscribed": "Unsubscribed", "Updating": "Updating"}, + ) + offer_detail.term_id = AAZStrArg( + options=["term-id"], + help="Offer Plan Term Id", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.term_unit = AAZStrArg( + options=["term-unit"], + help="Offer Plan Term unit", + required=True, + fmt=AAZStrArgFormat( + max_length=25, + ), + ) + + private_offer_ids = cls._args_schema.offer_detail.private_offer_ids + private_offer_ids.Element = AAZStrArg() + + user_detail = cls._args_schema.user_detail + user_detail.aad_email = AAZStrArg( + options=["aad-email"], + help="AAD email address", + ) + user_detail.email_address = AAZStrArg( + options=["email-address"], + help="Email address", + required=True, + fmt=AAZStrArgFormat( + pattern="^\\S+@\\S+\\.\\S+$", + ), + ) + user_detail.first_name = AAZStrArg( + options=["first-name"], + help="First name", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + user_detail.last_name = AAZStrArg( + options=["last-name"], + help="Last name", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + user_detail.user_principal_name = AAZStrArg( + options=["user-principal-name"], + help="User principal name", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ValidationsValidateOrganization(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ValidationsValidateOrganization(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/validations/{organizationName}/orgvalidate", + **self.url_parameters + ) + + @property + def method(self): + return "POST" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-02-13", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"required": True, "client_flatten": True}} + ) + _builder.set_prop("location", AAZStrType, ".location") + _builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}}) + _builder.set_prop("tags", AAZDictType, ".tags") + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("linkOrganization", AAZObjectType, ".link_organization") + properties.set_prop("offerDetail", AAZObjectType, ".offer_detail", typ_kwargs={"flags": {"required": True}}) + properties.set_prop("userDetail", AAZObjectType, ".user_detail", typ_kwargs={"flags": {"required": True}}) + + link_organization = _builder.get(".properties.linkOrganization") + if link_organization is not None: + link_organization.set_prop("token", AAZStrType, ".token", typ_kwargs={"flags": {"secret": True}}) + + offer_detail = _builder.get(".properties.offerDetail") + if offer_detail is not None: + offer_detail.set_prop("id", AAZStrType, ".id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("planId", AAZStrType, ".plan_id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("planName", AAZStrType, ".plan_name", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("privateOfferId", AAZStrType, ".private_offer_id") + offer_detail.set_prop("privateOfferIds", AAZListType, ".private_offer_ids") + offer_detail.set_prop("publisherId", AAZStrType, ".publisher_id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("status", AAZStrType, ".status") + offer_detail.set_prop("termId", AAZStrType, ".term_id") + offer_detail.set_prop("termUnit", AAZStrType, ".term_unit", typ_kwargs={"flags": {"required": True}}) + + private_offer_ids = _builder.get(".properties.offerDetail.privateOfferIds") + if private_offer_ids is not None: + private_offer_ids.set_elements(AAZStrType, ".") + + user_detail = _builder.get(".properties.userDetail") + if user_detail is not None: + user_detail.set_prop("aadEmail", AAZStrType, ".aad_email") + user_detail.set_prop("emailAddress", AAZStrType, ".email_address", typ_kwargs={"flags": {"required": True}}) + user_detail.set_prop("firstName", AAZStrType, ".first_name") + user_detail.set_prop("lastName", AAZStrType, ".last_name") + user_detail.set_prop("userPrincipalName", AAZStrType, ".user_principal_name") + + tags = _builder.get(".tags") + if tags is not None: + tags.set_elements(AAZStrType, ".") + + return self.serialize_content(_content_value) + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.location = AAZStrType() + _schema_on_200.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200.properties = AAZObjectType( + flags={"required": True, "client_flatten": True}, + ) + _schema_on_200.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200.tags = AAZDictType() + _schema_on_200.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200.properties + properties.created_time = AAZStrType( + serialized_name="createdTime", + flags={"read_only": True}, + ) + properties.offer_detail = AAZObjectType( + serialized_name="offerDetail", + flags={"required": True}, + ) + properties.organization_id = AAZStrType( + serialized_name="organizationId", + flags={"read_only": True}, + ) + properties.provisioning_state = AAZStrType( + serialized_name="provisioningState", + flags={"read_only": True}, + ) + properties.sso_url = AAZStrType( + serialized_name="ssoUrl", + flags={"read_only": True}, + ) + properties.user_detail = AAZObjectType( + serialized_name="userDetail", + flags={"required": True}, + ) + + offer_detail = cls._schema_on_200.properties.offer_detail + offer_detail.id = AAZStrType( + flags={"required": True}, + ) + offer_detail.plan_id = AAZStrType( + serialized_name="planId", + flags={"required": True}, + ) + offer_detail.plan_name = AAZStrType( + serialized_name="planName", + flags={"required": True}, + ) + offer_detail.private_offer_id = AAZStrType( + serialized_name="privateOfferId", + ) + offer_detail.private_offer_ids = AAZListType( + serialized_name="privateOfferIds", + ) + offer_detail.publisher_id = AAZStrType( + serialized_name="publisherId", + flags={"required": True}, + ) + offer_detail.status = AAZStrType() + offer_detail.term_id = AAZStrType( + serialized_name="termId", + ) + offer_detail.term_unit = AAZStrType( + serialized_name="termUnit", + flags={"required": True}, + ) + + private_offer_ids = cls._schema_on_200.properties.offer_detail.private_offer_ids + private_offer_ids.Element = AAZStrType() + + user_detail = cls._schema_on_200.properties.user_detail + user_detail.aad_email = AAZStrType( + serialized_name="aadEmail", + ) + user_detail.email_address = AAZStrType( + serialized_name="emailAddress", + flags={"required": True}, + ) + user_detail.first_name = AAZStrType( + serialized_name="firstName", + ) + user_detail.last_name = AAZStrType( + serialized_name="lastName", + ) + user_detail.user_principal_name = AAZStrType( + serialized_name="userPrincipalName", + ) + + system_data = cls._schema_on_200.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + tags = cls._schema_on_200.tags + tags.Element = AAZStrType() + + return cls._schema_on_200 + + +class _OrgvalidateHelper: + """Helper class for Orgvalidate""" + + +__all__ = ["Orgvalidate"] diff --git a/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate_v2.py b/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate_v2.py new file mode 100644 index 00000000000..195002d207b --- /dev/null +++ b/src/confluent/azext_confluent/aaz/latest/confluent/validation/_orgvalidate_v2.py @@ -0,0 +1,366 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "confluent validation orgvalidate-v2", +) +class OrgvalidateV2(AAZCommand): + """Validate Confluent organization resource + """ + + _aaz_info = { + "version": "2024-02-13", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.confluent/validations/{}/orgvalidatev2", "2024-02-13"], + ] + } + + def _handler(self, command_args): + super()._handler(command_args) + self._execute_operations() + return self._output() + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.organization_name = AAZStrArg( + options=["--organization-name"], + help="Organization resource name", + required=True, + id_part="name", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Body" + + _args_schema = cls._args_schema + _args_schema.location = AAZResourceLocationArg( + arg_group="Body", + help="Location of Organization resource", + fmt=AAZResourceLocationArgFormat( + resource_group_arg="resource_group", + ), + ) + _args_schema.tags = AAZDictArg( + options=["--tags"], + arg_group="Body", + help="Organization resource tags", + ) + + tags = cls._args_schema.tags + tags.Element = AAZStrArg() + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.link_organization = AAZObjectArg( + options=["--link-organization"], + arg_group="Properties", + help="Link an existing Confluent organization", + ) + _args_schema.offer_detail = AAZObjectArg( + options=["--offer-detail"], + arg_group="Properties", + help="Confluent offer detail", + required=True, + ) + _args_schema.user_detail = AAZObjectArg( + options=["--user-detail"], + arg_group="Properties", + help="Subscriber detail", + required=True, + ) + + link_organization = cls._args_schema.link_organization + link_organization.token = AAZStrArg( + options=["token"], + help="User auth token", + required=True, + ) + + offer_detail = cls._args_schema.offer_detail + offer_detail.id = AAZStrArg( + options=["id"], + help="Offer Id", + required=True, + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.plan_id = AAZStrArg( + options=["plan-id"], + help="Offer Plan Id", + required=True, + fmt=AAZStrArgFormat( + max_length=200, + ), + ) + offer_detail.plan_name = AAZStrArg( + options=["plan-name"], + help="Offer Plan Name", + required=True, + fmt=AAZStrArgFormat( + max_length=200, + ), + ) + offer_detail.private_offer_id = AAZStrArg( + options=["private-offer-id"], + help="Private Offer Id", + fmt=AAZStrArgFormat( + max_length=255, + ), + ) + offer_detail.private_offer_ids = AAZListArg( + options=["private-offer-ids"], + help="Array of Private Offer Ids", + ) + offer_detail.publisher_id = AAZStrArg( + options=["publisher-id"], + help="Publisher Id", + required=True, + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.status = AAZStrArg( + options=["status"], + help="SaaS Offer Status", + enum={"Failed": "Failed", "InProgress": "InProgress", "PendingFulfillmentStart": "PendingFulfillmentStart", "Reinstated": "Reinstated", "Started": "Started", "Subscribed": "Subscribed", "Succeeded": "Succeeded", "Suspended": "Suspended", "Unsubscribed": "Unsubscribed", "Updating": "Updating"}, + ) + offer_detail.term_id = AAZStrArg( + options=["term-id"], + help="Offer Plan Term Id", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + offer_detail.term_unit = AAZStrArg( + options=["term-unit"], + help="Offer Plan Term unit", + required=True, + fmt=AAZStrArgFormat( + max_length=25, + ), + ) + + private_offer_ids = cls._args_schema.offer_detail.private_offer_ids + private_offer_ids.Element = AAZStrArg() + + user_detail = cls._args_schema.user_detail + user_detail.aad_email = AAZStrArg( + options=["aad-email"], + help="AAD email address", + ) + user_detail.email_address = AAZStrArg( + options=["email-address"], + help="Email address", + required=True, + fmt=AAZStrArgFormat( + pattern="^\\S+@\\S+\\.\\S+$", + ), + ) + user_detail.first_name = AAZStrArg( + options=["first-name"], + help="First name", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + user_detail.last_name = AAZStrArg( + options=["last-name"], + help="Last name", + fmt=AAZStrArgFormat( + max_length=50, + ), + ) + user_detail.user_principal_name = AAZStrArg( + options=["user-principal-name"], + help="User principal name", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.ValidationsValidateOrganizationV2(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class ValidationsValidateOrganizationV2(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Confluent/validations/{organizationName}/orgvalidateV2", + **self.url_parameters + ) + + @property + def method(self): + return "POST" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "organizationName", self.ctx.args.organization_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-02-13", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"required": True, "client_flatten": True}} + ) + _builder.set_prop("location", AAZStrType, ".location") + _builder.set_prop("properties", AAZObjectType, ".", typ_kwargs={"flags": {"required": True, "client_flatten": True}}) + _builder.set_prop("tags", AAZDictType, ".tags") + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("linkOrganization", AAZObjectType, ".link_organization") + properties.set_prop("offerDetail", AAZObjectType, ".offer_detail", typ_kwargs={"flags": {"required": True}}) + properties.set_prop("userDetail", AAZObjectType, ".user_detail", typ_kwargs={"flags": {"required": True}}) + + link_organization = _builder.get(".properties.linkOrganization") + if link_organization is not None: + link_organization.set_prop("token", AAZStrType, ".token", typ_kwargs={"flags": {"secret": True}}) + + offer_detail = _builder.get(".properties.offerDetail") + if offer_detail is not None: + offer_detail.set_prop("id", AAZStrType, ".id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("planId", AAZStrType, ".plan_id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("planName", AAZStrType, ".plan_name", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("privateOfferId", AAZStrType, ".private_offer_id") + offer_detail.set_prop("privateOfferIds", AAZListType, ".private_offer_ids") + offer_detail.set_prop("publisherId", AAZStrType, ".publisher_id", typ_kwargs={"flags": {"required": True}}) + offer_detail.set_prop("status", AAZStrType, ".status") + offer_detail.set_prop("termId", AAZStrType, ".term_id") + offer_detail.set_prop("termUnit", AAZStrType, ".term_unit", typ_kwargs={"flags": {"required": True}}) + + private_offer_ids = _builder.get(".properties.offerDetail.privateOfferIds") + if private_offer_ids is not None: + private_offer_ids.set_elements(AAZStrType, ".") + + user_detail = _builder.get(".properties.userDetail") + if user_detail is not None: + user_detail.set_prop("aadEmail", AAZStrType, ".aad_email") + user_detail.set_prop("emailAddress", AAZStrType, ".email_address", typ_kwargs={"flags": {"required": True}}) + user_detail.set_prop("firstName", AAZStrType, ".first_name") + user_detail.set_prop("lastName", AAZStrType, ".last_name") + user_detail.set_prop("userPrincipalName", AAZStrType, ".user_principal_name") + + tags = _builder.get(".tags") + if tags is not None: + tags.set_elements(AAZStrType, ".") + + return self.serialize_content(_content_value) + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.info = AAZDictType() + + info = cls._schema_on_200.info + info.Element = AAZStrType() + + return cls._schema_on_200 + + +class _OrgvalidateV2Helper: + """Helper class for OrgvalidateV2""" + + +__all__ = ["OrgvalidateV2"] diff --git a/src/confluent/azext_confluent/azext_metadata.json b/src/confluent/azext_confluent/azext_metadata.json index b9c3b873766..06012c7c942 100644 --- a/src/confluent/azext_confluent/azext_metadata.json +++ b/src/confluent/azext_confluent/azext_metadata.json @@ -1,3 +1,3 @@ { - "azext.minCliCoreVersion": "2.70.0" + "azext.minCliCoreVersion": "2.75.0" } \ No newline at end of file diff --git a/src/confluent/azext_confluent/generated/_help.py b/src/confluent/azext_confluent/generated/_help.py index 758309e4f7b..7e6df3300d7 100644 --- a/src/confluent/azext_confluent/generated/_help.py +++ b/src/confluent/azext_confluent/generated/_help.py @@ -93,3 +93,121 @@ text: |- az confluent organization wait --name "myOrganization" --resource-group "myResourceGroup" --deleted """ + +helps['confluent agreement default create'] = """ + type: command + short-summary: "Create Confluent Marketplace agreement in the subscription." + examples: + - name: Create agreement + text: |- + az confluent agreement default create --accepted true +""" + +helps['confluent organization environment create'] = """ + type: command + short-summary: "Create confluent environment." + examples: + - name: Create environment + text: |- + az confluent organization environment create --organization-name myOrganization \ +--resource-group myResourceGroup --name env-abc123 --kind Standard +""" + +helps['confluent organization environment update'] = """ + type: command + short-summary: "Update confluent environment." + examples: + - name: Update environment + text: |- + az confluent organization environment update --organization-name myOrganization \ +--resource-group myResourceGroup --name env-abc123 --stream-governance-config package=ESSENTIALS +""" + +helps['confluent organization environment cluster create'] = """ + type: command + short-summary: "Create confluent clusters." + examples: + - name: Create cluster + text: |- + az confluent organization environment cluster create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --name lkc-abc123 --kind Kafka +""" + +helps['confluent organization environment cluster update'] = """ + type: command + short-summary: "Update confluent clusters." + examples: + - name: Update cluster + text: |- + az confluent organization environment cluster update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --name lkc-abc123 \ +--spec availability=SINGLE_ZONE cloud=Azure +""" + +helps['confluent organization environment cluster connector create'] = """ + type: command + short-summary: "Create confluent connector." + examples: + - name: Create connector + text: |- + az confluent organization environment cluster connector create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-connector \ +--connector-basic-info connector-type=SINK connector-class=AZUREBLOBSINK +""" + +helps['confluent organization environment cluster connector update'] = """ + type: command + short-summary: "Update confluent connector." + examples: + - name: Update connector + text: |- + az confluent organization environment cluster connector update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-connector \ +--connector-basic-info connector-state=PAUSED +""" + +helps['confluent organization environment cluster topic create'] = """ + type: command + short-summary: "Create confluent topic." + examples: + - name: Create topic + text: |- + az confluent organization environment cluster topic create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-topic \ +--partitions-count 6 +""" + +helps['confluent organization environment cluster topic update'] = """ + type: command + short-summary: "Update confluent topic." + examples: + - name: Update topic + text: |- + az confluent organization environment cluster topic update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-topic \ +--input-configs '[{name:retention.ms,value:604800000}]' +""" + +helps['confluent validation orgvalidate'] = """ + type: command + short-summary: "Organization Validate proxy resource." + examples: + - name: Validate organization + text: |- + az confluent validation orgvalidate --organization-name myOrganization \ +--resource-group myResourceGroup \ +--offer-detail id="confluent-cloud-azure-prod" plan-id="confluent-cloud-azure-payg-prod" plan-name="Confluent Cloud - Pay as you Go" publisher-id="confluentinc" term-unit="P1M" \ +--user-detail email-address="user@example.com" +""" + +helps['confluent validation orgvalidate-v2'] = """ + type: command + short-summary: "Validate Confluent organization resource." + examples: + - name: Validate organization (v2) + text: |- + az confluent validation orgvalidate-v2 --organization-name myOrganization \ +--resource-group myResourceGroup \ +--offer-detail id="confluent-cloud-azure-prod" plan-id="confluent-cloud-azure-payg-prod" plan-name="Confluent Cloud - Pay as you Go" publisher-id="confluentinc" term-unit="P1M" \ +--user-detail email-address="user@example.com" +""" diff --git a/src/confluent/azext_confluent/manual/_help.py b/src/confluent/azext_confluent/manual/_help.py index 5add413bd63..4617f743f6b 100644 --- a/src/confluent/azext_confluent/manual/_help.py +++ b/src/confluent/azext_confluent/manual/_help.py @@ -78,3 +78,121 @@ text: |- az confluent offer-detail show """ + +helps['confluent agreement default create'] = """ + type: command + short-summary: "Create Confluent Marketplace agreement in the subscription." + examples: + - name: Create agreement + text: |- + az confluent agreement default create --accepted true +""" + +helps['confluent organization environment create'] = """ + type: command + short-summary: "Create confluent environment." + examples: + - name: Create environment + text: |- + az confluent organization environment create --organization-name myOrganization \ +--resource-group myResourceGroup --name env-abc123 --kind Standard +""" + +helps['confluent organization environment update'] = """ + type: command + short-summary: "Update confluent environment." + examples: + - name: Update environment + text: |- + az confluent organization environment update --organization-name myOrganization \ +--resource-group myResourceGroup --name env-abc123 --stream-governance-config package=ESSENTIALS +""" + +helps['confluent organization environment cluster create'] = """ + type: command + short-summary: "Create confluent clusters." + examples: + - name: Create cluster + text: |- + az confluent organization environment cluster create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --name lkc-abc123 --kind Kafka +""" + +helps['confluent organization environment cluster update'] = """ + type: command + short-summary: "Update confluent clusters." + examples: + - name: Update cluster + text: |- + az confluent organization environment cluster update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --name lkc-abc123 \ +--spec availability=SINGLE_ZONE cloud=Azure +""" + +helps['confluent organization environment cluster connector create'] = """ + type: command + short-summary: "Create confluent connector." + examples: + - name: Create connector + text: |- + az confluent organization environment cluster connector create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-connector \ +--connector-basic-info connector-type=SINK connector-class=AZUREBLOBSINK +""" + +helps['confluent organization environment cluster connector update'] = """ + type: command + short-summary: "Update confluent connector." + examples: + - name: Update connector + text: |- + az confluent organization environment cluster connector update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-connector \ +--connector-basic-info connector-state=PAUSED +""" + +helps['confluent organization environment cluster topic create'] = """ + type: command + short-summary: "Create confluent topic." + examples: + - name: Create topic + text: |- + az confluent organization environment cluster topic create --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-topic \ +--partitions-count 6 +""" + +helps['confluent organization environment cluster topic update'] = """ + type: command + short-summary: "Update confluent topic." + examples: + - name: Update topic + text: |- + az confluent organization environment cluster topic update --organization-name myOrganization \ +--resource-group myResourceGroup --environment-id env-abc123 --cluster-id lkc-abc123 --name my-topic \ +--input-configs '[{name:retention.ms,value:604800000}]' +""" + +helps['confluent validation orgvalidate'] = """ + type: command + short-summary: "Organization Validate proxy resource." + examples: + - name: Validate organization + text: |- + az confluent validation orgvalidate --organization-name myOrganization \ +--resource-group myResourceGroup \ +--offer-detail id="confluent-cloud-azure-prod" plan-id="confluent-cloud-azure-payg-prod" plan-name="Confluent Cloud - Pay as you Go" publisher-id="confluentinc" term-unit="P1M" \ +--user-detail email-address="user@example.com" +""" + +helps['confluent validation orgvalidate-v2'] = """ + type: command + short-summary: "Validate Confluent organization resource." + examples: + - name: Validate organization (v2) + text: |- + az confluent validation orgvalidate-v2 --organization-name myOrganization \ +--resource-group myResourceGroup \ +--offer-detail id="confluent-cloud-azure-prod" plan-id="confluent-cloud-azure-payg-prod" plan-name="Confluent Cloud - Pay as you Go" publisher-id="confluentinc" term-unit="P1M" \ +--user-detail email-address="user@example.com" +""" diff --git a/src/confluent/setup.py b/src/confluent/setup.py index d9eced51620..2838e8cdc2f 100644 --- a/src/confluent/setup.py +++ b/src/confluent/setup.py @@ -10,7 +10,7 @@ from setuptools import setup, find_packages # HISTORY.rst entry. -VERSION = '1.1.0' +VERSION = '1.2.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers