diff --git a/.github/plugins/azure-sdk-python/skills/agent-framework-azure-ai-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/agent-framework-azure-ai-py/SKILL.md index 9fceffba..5b931049 100644 --- a/.github/plugins/azure-sdk-python/skills/agent-framework-azure-ai-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/agent-framework-azure-ai-py/SKILL.md @@ -334,6 +334,11 @@ if __name__ == "__main__": - Use `get_new_thread()` for multi-turn conversations - Prefer `HostedMCPTool` for service-managed MCP, `MCPStreamableHTTPTool` for client-managed +## Best Practices + +1. **This SDK is async-first** — use `async def` handlers and `async with` throughout. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. + ## Reference Files - [references/tools.md](references/tools.md): Detailed hosted tool patterns diff --git a/.github/plugins/azure-sdk-python/skills/agents-v2-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/agents-v2-py/SKILL.md index bc371241..900bb8ea 100644 --- a/.github/plugins/azure-sdk-python/skills/agents-v2-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/agents-v2-py/SKILL.md @@ -78,27 +78,26 @@ from azure.ai.projects.models import ( ### 2. Create Hosted Agent ```python -client = AIProjectClient( +with AIProjectClient( endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential() -) - -agent = client.agents.create_version( - agent_name="my-hosted-agent", - definition=ImageBasedHostedAgentDefinition( - container_protocol_versions=[ - ProtocolVersionRecord(protocol=AgentProtocol.RESPONSES, version="v1") - ], - cpu="1", - memory="2Gi", - image="myregistry.azurecr.io/my-agent:latest", - tools=[{"type": "code_interpreter"}], - environment_variables={ - "AZURE_AI_PROJECT_ENDPOINT": os.environ["AZURE_AI_PROJECT_ENDPOINT"], - "MODEL_NAME": "gpt-4o-mini" - } +) as client: + agent = client.agents.create_version( + agent_name="my-hosted-agent", + definition=ImageBasedHostedAgentDefinition( + container_protocol_versions=[ + ProtocolVersionRecord(protocol=AgentProtocol.RESPONSES, version="v1") + ], + cpu="1", + memory="2Gi", + image="myregistry.azurecr.io/my-agent:latest", + tools=[{"type": "code_interpreter"}], + environment_variables={ + "AZURE_AI_PROJECT_ENDPOINT": os.environ["AZURE_AI_PROJECT_ENDPOINT"], + "MODEL_NAME": "gpt-4o-mini" + } + ) ) -) print(f"Created agent: {agent.name} (version: {agent.version})") ``` @@ -235,34 +234,33 @@ from azure.ai.projects.models import ( def create_hosted_agent(): """Create a hosted agent with custom container image.""" - client = AIProjectClient( + with AIProjectClient( endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential() - ) - - agent = client.agents.create_version( - agent_name="data-processor-agent", - definition=ImageBasedHostedAgentDefinition( - container_protocol_versions=[ - ProtocolVersionRecord( - protocol=AgentProtocol.RESPONSES, - version="v1" - ) - ], - image="myregistry.azurecr.io/data-processor:v1.0", - cpu="2", - memory="4Gi", - tools=[ - {"type": "code_interpreter"}, - {"type": "file_search"} - ], - environment_variables={ - "AZURE_AI_PROJECT_ENDPOINT": os.environ["AZURE_AI_PROJECT_ENDPOINT"], - "MODEL_NAME": "gpt-4o-mini", - "MAX_RETRIES": "3" - } + ) as client: + agent = client.agents.create_version( + agent_name="data-processor-agent", + definition=ImageBasedHostedAgentDefinition( + container_protocol_versions=[ + ProtocolVersionRecord( + protocol=AgentProtocol.RESPONSES, + version="v1" + ) + ], + image="myregistry.azurecr.io/data-processor:v1.0", + cpu="2", + memory="4Gi", + tools=[ + {"type": "code_interpreter"}, + {"type": "file_search"} + ], + environment_variables={ + "AZURE_AI_PROJECT_ENDPOINT": os.environ["AZURE_AI_PROJECT_ENDPOINT"], + "MODEL_NAME": "gpt-4o-mini", + "MAX_RETRIES": "3" + } + ) ) - ) print(f"Created hosted agent: {agent.name}") print(f"Version: {agent.version}") @@ -322,11 +320,13 @@ async def create_hosted_agent_async(): ## Best Practices -1. **Version Your Images** - Use specific tags, not `latest` in production -2. **Minimal Resources** - Start with minimum CPU/memory, scale up as needed -3. **Environment Variables** - Use for all configuration, never hardcode -4. **Error Handling** - Wrap agent creation in try/except blocks -5. **Cleanup** - Delete unused agent versions to free resources +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Version Your Images** - Use specific tags, not `latest` in production +4. **Minimal Resources** - Start with minimum CPU/memory, scale up as needed +5. **Environment Variables** - Use for all configuration, never hardcode +6. **Error Handling** - Wrap agent creation in try/except blocks +7. **Cleanup** - Delete unused agent versions to free resources ## Reference Links diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-contentsafety-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-contentsafety-py/SKILL.md index 12f5e4b9..5be93049 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-contentsafety-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-contentsafety-py/SKILL.md @@ -67,18 +67,17 @@ from azure.ai.contentsafety import ContentSafetyClient from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory from azure.core.credentials import AzureKeyCredential -client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) - -request = AnalyzeTextOptions(text="Your text content to analyze") -response = client.analyze_text(request) - -# Check each category -for category in [TextCategory.HATE, TextCategory.SELF_HARM, - TextCategory.SEXUAL, TextCategory.VIOLENCE]: - result = next((r for r in response.categories_analysis - if r.category == category), None) - if result: - print(f"{category}: severity {result.severity}") +with ContentSafetyClient(endpoint, AzureKeyCredential(key)) as client: + request = AnalyzeTextOptions(text="Your text content to analyze") + response = client.analyze_text(request) + + # Check each category + for category in [TextCategory.HATE, TextCategory.SELF_HARM, + TextCategory.SEXUAL, TextCategory.VIOLENCE]: + result = next((r for r in response.categories_analysis + if r.category == category), None) + if result: + print(f"{category}: severity {result.severity}") ``` ## Analyze Image @@ -89,20 +88,19 @@ from azure.ai.contentsafety.models import AnalyzeImageOptions, ImageData from azure.core.credentials import AzureKeyCredential import base64 -client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) +with ContentSafetyClient(endpoint, AzureKeyCredential(key)) as client: + # From file + with open("image.jpg", "rb") as f: + image_data = base64.b64encode(f.read()).decode("utf-8") -# From file -with open("image.jpg", "rb") as f: - image_data = base64.b64encode(f.read()).decode("utf-8") - -request = AnalyzeImageOptions( - image=ImageData(content=image_data) -) + request = AnalyzeImageOptions( + image=ImageData(content=image_data) + ) -response = client.analyze_image(request) + response = client.analyze_image(request) -for result in response.categories_analysis: - print(f"{result.category}: severity {result.severity}") + for result in response.categories_analysis: + print(f"{result.category}: severity {result.severity}") ``` ### Image from URL @@ -126,17 +124,16 @@ from azure.ai.contentsafety import BlocklistClient from azure.ai.contentsafety.models import TextBlocklist from azure.core.credentials import AzureKeyCredential -blocklist_client = BlocklistClient(endpoint, AzureKeyCredential(key)) - -blocklist = TextBlocklist( - blocklist_name="my-blocklist", - description="Custom terms to block" -) +with BlocklistClient(endpoint, AzureKeyCredential(key)) as blocklist_client: + blocklist = TextBlocklist( + blocklist_name="my-blocklist", + description="Custom terms to block" + ) -result = blocklist_client.create_or_update_text_blocklist( - blocklist_name="my-blocklist", - options=blocklist -) + result = blocklist_client.create_or_update_text_blocklist( + blocklist_name="my-blocklist", + options=blocklist + ) ``` ### Add Block Items @@ -215,10 +212,12 @@ request = AnalyzeTextOptions( ## Best Practices -1. **Use blocklists** for domain-specific terms -2. **Set severity thresholds** appropriate for your use case -3. **Handle multiple categories** — content can be harmful in multiple ways -4. **Use halt_on_blocklist_hit** for immediate rejection -5. **Log analysis results** for audit and improvement -6. **Consider 8-severity mode** for finer-grained control -7. **Pre-moderate AI outputs** before showing to users +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.contentsafety` sync clients with `azure.ai.contentsafety.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with ContentSafetyClient(...) as client:` (sync) or `async with ContentSafetyClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use blocklists** for domain-specific terms +4. **Set severity thresholds** appropriate for your use case +5. **Handle multiple categories** — content can be harmful in multiple ways +6. **Use halt_on_blocklist_hit** for immediate rejection +7. **Log analysis results** for audit and improvement +8. **Consider 8-severity mode** for finer-grained control +9. **Pre-moderate AI outputs** before showing to users diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-contentunderstanding-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-contentunderstanding-py/SKILL.md index 3d5abfec..54c9cec5 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-contentunderstanding-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-contentunderstanding-py/SKILL.md @@ -70,22 +70,21 @@ from azure.ai.contentunderstanding.models import AnalyzeInput from azure.identity import DefaultAzureCredential endpoint = os.environ["CONTENTUNDERSTANDING_ENDPOINT"] -client = ContentUnderstandingClient( +with ContentUnderstandingClient( endpoint=endpoint, credential=DefaultAzureCredential() -) - -# Analyze document from URL -poller = client.begin_analyze( - analyzer_id="prebuilt-documentSearch", - inputs=[AnalyzeInput(url="https://example.com/document.pdf")] -) - -result = poller.result() - -# Access markdown content (contents is a list) -content = result.contents[0] -print(content.markdown) +) as client: + # Analyze document from URL + poller = client.begin_analyze( + analyzer_id="prebuilt-documentSearch", + inputs=[AnalyzeInput(url="https://example.com/document.pdf")] + ) + + result = poller.result() + + # Access markdown content (contents is a list) + content = result.contents[0] + print(content.markdown) ``` ## Access Document Content Details @@ -226,19 +225,18 @@ from azure.identity.aio import DefaultAzureCredential async def analyze_document(): endpoint = os.environ["CONTENTUNDERSTANDING_ENDPOINT"] - credential = DefaultAzureCredential() - - async with ContentUnderstandingClient( - endpoint=endpoint, - credential=credential - ) as client: - poller = await client.begin_analyze( - analyzer_id="prebuilt-documentSearch", - inputs=[AnalyzeInput(url="https://example.com/doc.pdf")] - ) - result = await poller.result() - content = result.contents[0] - return content.markdown + async with DefaultAzureCredential() as credential: + async with ContentUnderstandingClient( + endpoint=endpoint, + credential=credential + ) as client: + poller = await client.begin_analyze( + analyzer_id="prebuilt-documentSearch", + inputs=[AnalyzeInput(url="https://example.com/doc.pdf")] + ) + result = await poller.result() + content = result.contents[0] + return content.markdown asyncio.run(analyze_document()) ``` @@ -273,10 +271,12 @@ from azure.ai.contentunderstanding.models import ( ## Best Practices -1. **Use `begin_analyze` with `AnalyzeInput`** — this is the correct method signature -2. **Access results via `result.contents[0]`** — results are returned as a list -3. **Use prebuilt analyzers** for common scenarios (document/image/audio/video search) -4. **Create custom analyzers** only for domain-specific field extraction -5. **Use async client** for high-throughput scenarios with `azure.identity.aio` credentials -6. **Handle long-running operations** — video/audio analysis can take minutes -7. **Use URL sources** when possible to avoid upload overhead +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.contentunderstanding` sync clients with `azure.ai.contentunderstanding.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with ContentUnderstandingClient(...) as client:` (sync) or `async with ContentUnderstandingClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `begin_analyze` with `AnalyzeInput`** — this is the correct method signature +4. **Access results via `result.contents[0]`** — results are returned as a list +5. **Use prebuilt analyzers** for common scenarios (document/image/audio/video search) +6. **Create custom analyzers** only for domain-specific field extraction +7. **Use async client** for high-throughput scenarios with `azure.identity.aio` credentials +8. **Handle long-running operations** — video/audio analysis can take minutes +9. **Use URL sources** when possible to avoid upload overhead diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-language-conversations-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-language-conversations-py/SKILL.md index 0edfbfb8..7a373846 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-language-conversations-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-language-conversations-py/SKILL.md @@ -20,8 +20,9 @@ When responding to requests about Azure AI Language Conversations: 4. Handle exceptions properly. ## Best Practices +- **Pick sync OR async and stay consistent.** Do not mix `azure.ai.language.conversations` sync clients with `azure.ai.language.conversations.aio` async clients in the same call path. Choose one mode per module. +- **Always use context managers for clients and async credentials.** Wrap every client in `with ConversationAnalysisClient(...) as client:` (sync) or `async with ConversationAnalysisClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. - Use environment variables for the endpoint, API key, project name, and deployment name. -- Always use context managers (`with client:`) to ensure proper resource handling. - Clearly map the `participantId` and `id` in the `conversationItem` payload. ## Examples diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-ml-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-ml-py/SKILL.md index d1e90a58..ff3de5a9 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-ml-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-ml-py/SKILL.md @@ -273,10 +273,12 @@ print(f"Default: {default_ds.name}") ## Best Practices -1. **Use versioning** for data, models, and environments -2. **Configure idle scale-down** to reduce compute costs -3. **Use environments** for reproducible training -4. **Stream job logs** to monitor progress -5. **Register models** after successful training jobs -6. **Use pipelines** for multi-step workflows -7. **Tag resources** for organization and cost tracking +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.ml` sync clients with `azure.ai.ml` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with MLClient(...) as client:` (sync) or `async with MLClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use versioning** for data, models, and environments +4. **Configure idle scale-down** to reduce compute costs +5. **Use environments** for reproducible training +6. **Stream job logs** to monitor progress +7. **Register models** after successful training jobs +8. **Use pipelines** for multi-step workflows +9. **Tag resources** for organization and cost tracking diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-projects-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-projects-py/SKILL.md index 7d4f8d35..bacf2c7d 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-projects-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-projects-py/SKILL.md @@ -63,17 +63,16 @@ client = AIProjectClient( ```python from azure.ai.projects import AIProjectClient -client = AIProjectClient( +with AIProjectClient( endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), -) - -# Use Foundry-native operations -agent = client.agents.create_agent( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful.", -) +) as client: + # Use Foundry-native operations + agent = client.agents.create_agent( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful.", + ) ``` ### 2. OpenAI-Compatible Client @@ -270,11 +269,12 @@ agent = client.agents.create_agent( ## Best Practices -1. **Use context managers** for async client: `async with AIProjectClient(...) as client:` -2. **Clean up agents** when done: `client.agents.delete_agent(agent.id)` -3. **Use `create_and_process`** for simple runs, **streaming** for real-time UX -4. **Use versioned agents** for production deployments -5. **Prefer connections** for external service integration (AI Search, Bing, etc.) +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.projects` sync clients with `azure.ai.projects.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with AIProjectClient(...) as client:` (sync) or `async with AIProjectClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Clean up agents** when done: `client.agents.delete_agent(agent.id)` +4. **Use `create_and_process`** for simple runs, **streaming** for real-time UX +5. **Use versioned agents** for production deployments +6. **Prefer connections** for external service integration (AI Search, Bing, etc.) ## SDK Comparison diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-textanalytics-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-textanalytics-py/SKILL.md index bbcabebb..b01f3cbd 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-textanalytics-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-textanalytics-py/SKILL.md @@ -199,12 +199,13 @@ from azure.ai.textanalytics.aio import TextAnalyticsClient from azure.identity.aio import DefaultAzureCredential async def analyze(): - async with TextAnalyticsClient( - endpoint=endpoint, - credential=DefaultAzureCredential() - ) as client: - result = await client.analyze_sentiment(documents) - # Process results... + async with DefaultAzureCredential() as credential: + async with TextAnalyticsClient( + endpoint=endpoint, + credential=credential + ) as client: + result = await client.analyze_sentiment(documents) + # Process results... ``` ## Client Types @@ -229,9 +230,10 @@ async def analyze(): ## Best Practices -1. **Use batch operations** for multiple documents (up to 10 per request) -2. **Enable opinion mining** for detailed aspect-based sentiment -3. **Use async client** for high-throughput scenarios -4. **Handle document errors** — results list may contain errors for some docs -5. **Specify language** when known to improve accuracy -6. **Use context manager** or close client explicitly +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.textanalytics` sync clients with `azure.ai.textanalytics.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with TextAnalyticsClient(...) as client:` (sync) or `async with TextAnalyticsClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use batch operations** for multiple documents (up to 10 per request) +4. **Enable opinion mining** for detailed aspect-based sentiment +5. **Use async client** for high-throughput scenarios +6. **Handle document errors** — results list may contain errors for some docs +7. **Specify language** when known to improve accuracy diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-transcription-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-transcription-py/SKILL.md index 5fd05e61..a5155fe5 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-transcription-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-transcription-py/SKILL.md @@ -65,9 +65,11 @@ for event in stream: ## Best Practices -1. **Enable diarization** when multiple speakers are present -2. **Use batch transcription** for long files stored in blob storage -3. **Capture timestamps** for subtitle generation -4. **Specify language** to improve recognition accuracy -5. **Handle streaming backpressure** for real-time transcription -6. **Close transcription sessions** when complete +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Enable diarization** when multiple speakers are present +4. **Use batch transcription** for long files stored in blob storage +5. **Capture timestamps** for subtitle generation +6. **Specify language** to improve recognition accuracy +7. **Handle streaming backpressure** for real-time transcription +8. **Close transcription sessions** when complete diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-translation-document-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-translation-document-py/SKILL.md index 711de49c..f3af1cca 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-translation-document-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-translation-document-py/SKILL.md @@ -116,16 +116,15 @@ poller = client.begin_translation( ```python from azure.ai.translation.document import SingleDocumentTranslationClient -single_client = SingleDocumentTranslationClient(endpoint, AzureKeyCredential(key)) - with open("document.docx", "rb") as f: document_content = f.read() -result = single_client.translate( - body=document_content, - target_language="es", - content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document" -) +with SingleDocumentTranslationClient(endpoint, AzureKeyCredential(key)) as single_client: + result = single_client.translate( + body=document_content, + target_language="es", + content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document" + ) # Save translated document with open("document_es.docx", "wb") as f: @@ -249,10 +248,12 @@ async def translate_documents(): ## Best Practices -1. **Use SAS tokens** with minimal required permissions -2. **Monitor long-running operations** with `poller.status()` -3. **Handle document-level errors** by iterating document statuses -4. **Use glossaries** for domain-specific terminology -5. **Separate target containers** for each language -6. **Use async client** for multiple concurrent jobs -7. **Check supported formats** before submitting documents +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use SAS tokens** with minimal required permissions +4. **Monitor long-running operations** with `poller.status()` +5. **Handle document-level errors** by iterating document statuses +6. **Use glossaries** for domain-specific terminology +7. **Separate target containers** for each language +8. **Use async client** for multiple concurrent jobs +9. **Check supported formats** before submitting documents diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-translation-text-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-translation-text-py/SKILL.md index 509856bd..7ae1c5f5 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-translation-text-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-translation-text-py/SKILL.md @@ -277,10 +277,12 @@ async def translate_text(): ## Best Practices -1. **Batch translations** — Send multiple texts in one request (up to 100) -2. **Specify source language** when known to improve accuracy -3. **Use async client** for high-throughput scenarios -4. **Cache language list** — Supported languages don't change frequently -5. **Handle profanity** appropriately for your application -6. **Use html text_type** when translating HTML content -7. **Include alignment** for applications needing word mapping +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Batch translations** — Send multiple texts in one request (up to 100) +4. **Specify source language** when known to improve accuracy +5. **Use async client** for high-throughput scenarios +6. **Cache language list** — Supported languages don't change frequently +7. **Handle profanity** appropriately for your application +8. **Use html text_type** when translating HTML content +9. **Include alignment** for applications needing word mapping diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-vision-imageanalysis-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-vision-imageanalysis-py/SKILL.md index 49ea8065..1d6c314c 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-vision-imageanalysis-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-vision-imageanalysis-py/SKILL.md @@ -216,15 +216,16 @@ from azure.ai.vision.imageanalysis.aio import ImageAnalysisClient from azure.identity.aio import DefaultAzureCredential async def analyze_image(): - async with ImageAnalysisClient( - endpoint=endpoint, - credential=DefaultAzureCredential() - ) as client: - result = await client.analyze_from_url( - image_url=image_url, - visual_features=[VisualFeatures.CAPTION] - ) - print(result.caption.text) + async with DefaultAzureCredential() as credential: + async with ImageAnalysisClient( + endpoint=endpoint, + credential=credential + ) as client: + result = await client.analyze_from_url( + image_url=image_url, + visual_features=[VisualFeatures.CAPTION] + ) + print(result.caption.text) ``` ## Visual Features @@ -263,10 +264,12 @@ except HttpResponseError as e: ## Best Practices -1. **Select only needed features** to optimize latency and cost -2. **Use async client** for high-throughput scenarios -3. **Handle HttpResponseError** for invalid images or auth issues -4. **Enable gender_neutral_caption** for inclusive descriptions -5. **Specify language** for localized captions -6. **Use smart_crops_aspect_ratios** matching your thumbnail requirements -7. **Cache results** when analyzing the same image multiple times +1. **Pick sync OR async and stay consistent.** Do not mix `azure.ai.vision.imageanalysis` sync clients with `azure.ai.vision.imageanalysis.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with ImageAnalysisClient(...) as client:` (sync) or `async with ImageAnalysisClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Select only needed features** to optimize latency and cost +4. **Use async client** for high-throughput scenarios +5. **Handle HttpResponseError** for invalid images or auth issues +6. **Enable gender_neutral_caption** for inclusive descriptions +7. **Specify language** for localized captions +8. **Use smart_crops_aspect_ratios** matching your thumbnail requirements +9. **Cache results** when analyzing the same image multiple times diff --git a/.github/plugins/azure-sdk-python/skills/azure-ai-voicelive-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-ai-voicelive-py/SKILL.md index 1a5d337f..39a0bed4 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-ai-voicelive-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-ai-voicelive-py/SKILL.md @@ -314,6 +314,11 @@ except ConnectionError as e: print(f"Connection error: {e}") ``` +## Best Practices + +1. **This SDK is async-only; use `azure.ai.voicelive.aio` throughout.** Do not try to pair it with sync clients from other Azure SDKs in the same call path — keep the whole request path async. +2. **Always use context managers for clients and async credentials.** Wrap every connection in `async with connect(...) as conn:`. For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. + ## References - **Detailed API Reference**: See [references/api-reference.md](references/api-reference.md) diff --git a/.github/plugins/azure-sdk-python/skills/azure-appconfiguration-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-appconfiguration-py/SKILL.md index c4fe1278..b8d050cc 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-appconfiguration-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-appconfiguration-py/SKILL.md @@ -225,17 +225,13 @@ from azure.appconfiguration.aio import AzureAppConfigurationClient from azure.identity.aio import DefaultAzureCredential async def main(): - credential = DefaultAzureCredential() - client = AzureAppConfigurationClient( - base_url=endpoint, - credential=credential - ) - - setting = await client.get_configuration_setting(key="app:message") - print(setting.value) - - await client.close() - await credential.close() + async with DefaultAzureCredential() as credential: + async with AzureAppConfigurationClient( + base_url=endpoint, + credential=credential + ) as client: + setting = await client.get_configuration_setting(key="app:message") + print(setting.value) ``` ## Client Operations @@ -252,10 +248,12 @@ async def main(): ## Best Practices -1. **Use labels** for environment separation (dev, staging, prod) -2. **Use key prefixes** for logical grouping (app:database:*, app:cache:*) -3. **Make production settings read-only** to prevent accidental changes -4. **Create snapshots** before deployments for rollback capability -5. **Use Entra ID** instead of connection strings in production -6. **Refresh settings periodically** in long-running applications -7. **Use feature flags** for gradual rollouts and A/B testing +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use labels** for environment separation (dev, staging, prod) +4. **Use key prefixes** for logical grouping (app:database:*, app:cache:*) +5. **Make production settings read-only** to prevent accidental changes +6. **Create snapshots** before deployments for rollback capability +7. **Use Entra ID** instead of connection strings in production +8. **Refresh settings periodically** in long-running applications +9. **Use feature flags** for gradual rollouts and A/B testing diff --git a/.github/plugins/azure-sdk-python/skills/azure-containerregistry-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-containerregistry-py/SKILL.md index 94f949ef..4df98cd4 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-containerregistry-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-containerregistry-py/SKILL.md @@ -63,10 +63,9 @@ client = ContainerRegistryClient( ## List Repositories ```python -client = ContainerRegistryClient(endpoint, DefaultAzureCredential()) - -for repository in client.list_repository_names(): - print(repository) +with ContainerRegistryClient(endpoint, DefaultAzureCredential()) as client: + for repository in client.list_repository_names(): + print(repository) ``` ## Repository Operations @@ -193,18 +192,17 @@ client.delete_tag("my-image", "old-tag") ```python from azure.containerregistry import ContainerRegistryClient -client = ContainerRegistryClient(endpoint, DefaultAzureCredential()) - -# Download manifest -manifest = client.download_manifest("my-image", "latest") -print(f"Media type: {manifest.media_type}") -print(f"Digest: {manifest.digest}") +with ContainerRegistryClient(endpoint, DefaultAzureCredential()) as client: + # Download manifest + manifest = client.download_manifest("my-image", "latest") + print(f"Media type: {manifest.media_type}") + print(f"Digest: {manifest.digest}") -# Download blob -blob = client.download_blob("my-image", "sha256:abc123...") -with open("layer.tar.gz", "wb") as f: - for chunk in blob: - f.write(chunk) + # Download blob + blob = client.download_blob("my-image", "sha256:abc123...") + with open("layer.tar.gz", "wb") as f: + for chunk in blob: + f.write(chunk) ``` ## Async Client @@ -214,14 +212,10 @@ from azure.containerregistry.aio import ContainerRegistryClient from azure.identity.aio import DefaultAzureCredential async def list_repos(): - credential = DefaultAzureCredential() - client = ContainerRegistryClient(endpoint, credential) - - async for repo in client.list_repository_names(): - print(repo) - - await client.close() - await credential.close() + async with DefaultAzureCredential() as credential: + async with ContainerRegistryClient(endpoint, credential) as client: + async for repo in client.list_repository_names(): + print(repo) ``` ## Clean Up Old Images @@ -255,10 +249,12 @@ for manifest in client.list_manifest_properties("my-image"): ## Best Practices -1. **Use Entra ID** for authentication in production -2. **Delete by digest** not tag to avoid orphaned images -3. **Lock production images** with can_delete=False -4. **Clean up untagged manifests** regularly -5. **Use async client** for high-throughput operations -6. **Order by last_updated** to find recent/old images -7. **Check manifest.tags** before deleting to avoid removing tagged images +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use Microsoft Entra ID** for authentication in production +4. **Delete by digest** not tag to avoid orphaned images +5. **Lock production images** with can_delete=False +6. **Clean up untagged manifests** regularly +7. **Use async client** for high-throughput operations +8. **Order by last_updated** to find recent/old images +9. **Check manifest.tags** before deleting to avoid removing tagged images diff --git a/.github/plugins/azure-sdk-python/skills/azure-cosmos-db-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-cosmos-db-py/SKILL.md index d381917f..50c2823c 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-cosmos-db-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-cosmos-db-py/SKILL.md @@ -205,6 +205,11 @@ async def test_get_project_by_id_returns_project(mock_cosmos_container): **Full testing guide**: See [references/testing.md](references/testing.md) +## Best Practices + +1. **This skill uses async throughout (`azure.cosmos.aio`); do not mix with the sync `azure.cosmos` client.** Keep the whole FastAPI request path async — don't pair sync Cosmos calls with async handlers. +2. **Always use context managers for clients and async credentials.** Wrap the client in `async with CosmosClient(...) as client:` (or manage its lifetime via FastAPI lifespan and close it explicitly). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. + ## Reference Files | File | When to Read | diff --git a/.github/plugins/azure-sdk-python/skills/azure-cosmos-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-cosmos-py/SKILL.md index 392d0f3f..324848d0 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-cosmos-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-cosmos-py/SKILL.md @@ -233,24 +233,23 @@ from azure.cosmos.aio import CosmosClient from azure.identity.aio import DefaultAzureCredential async def cosmos_operations(): - credential = DefaultAzureCredential() - - async with CosmosClient(endpoint, credential=credential) as client: - database = client.get_database_client("mydb") - container = database.get_container_client("mycontainer") - - # Create - await container.create_item(body={"id": "1", "pk": "test"}) - - # Read - item = await container.read_item(item="1", partition_key="test") - - # Query - async for item in container.query_items( - query="SELECT * FROM c", - partition_key="test" - ): - print(item) + async with DefaultAzureCredential() as credential: + async with CosmosClient(endpoint, credential=credential) as client: + database = client.get_database_client("mydb") + container = database.get_container_client("mycontainer") + + # Create + await container.create_item(body={"id": "1", "pk": "test"}) + + # Read + item = await container.read_item(item="1", partition_key="test") + + # Query + async for item in container.query_items( + query="SELECT * FROM c", + partition_key="test" + ): + print(item) import asyncio asyncio.run(cosmos_operations()) @@ -274,13 +273,15 @@ except CosmosHttpResponseError as e: ## Best Practices -1. **Always specify partition key** for point reads and queries -2. **Use parameterized queries** to prevent injection and improve caching -3. **Avoid cross-partition queries** when possible -4. **Use `upsert_item`** for idempotent writes -5. **Use async client** for high-throughput scenarios -6. **Design partition key** for even data distribution -7. **Use `read_item`** instead of query for single document retrieval +1. **Pick sync OR async and stay consistent.** Do not mix `azure.cosmos` sync clients with `azure.cosmos.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with CosmosClient(...) as client:` (sync) or `async with CosmosClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Always specify partition key** for point reads and queries +4. **Use parameterized queries** to prevent injection and improve caching +5. **Avoid cross-partition queries** when possible +6. **Use `upsert_item`** for idempotent writes +7. **Use async client** for high-throughput scenarios +8. **Design partition key** for even data distribution +9. **Use `read_item`** instead of query for single document retrieval ## Reference Files diff --git a/.github/plugins/azure-sdk-python/skills/azure-data-tables-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-data-tables-py/SKILL.md index b35be83e..a67a0a0a 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-data-tables-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-data-tables-py/SKILL.md @@ -209,23 +209,22 @@ from azure.data.tables.aio import TableServiceClient, TableClient from azure.identity.aio import DefaultAzureCredential async def table_operations(): - credential = DefaultAzureCredential() - - async with TableClient( - endpoint="https://.table.core.windows.net", - table_name="mytable", - credential=credential - ) as client: - # Create - await client.create_entity(entity={ - "PartitionKey": "async", - "RowKey": "1", - "data": "test" - }) - - # Query - async for entity in client.query_entities("PartitionKey eq 'async'"): - print(entity) + async with DefaultAzureCredential() as credential: + async with TableClient( + endpoint="https://.table.core.windows.net", + table_name="mytable", + credential=credential + ) as client: + # Create + await client.create_entity(entity={ + "PartitionKey": "async", + "RowKey": "1", + "data": "test" + }) + + # Query + async for entity in client.query_entities("PartitionKey eq 'async'"): + print(entity) import asyncio asyncio.run(table_operations()) @@ -245,10 +244,12 @@ asyncio.run(table_operations()) ## Best Practices -1. **Design partition keys** for query patterns and even distribution -2. **Query within partitions** whenever possible (cross-partition is expensive) -3. **Use batch operations** for multiple entities in same partition -4. **Use `upsert_entity`** for idempotent writes -5. **Use parameterized queries** to prevent injection -6. **Keep entities small** — max 1MB per entity -7. **Use async client** for high-throughput scenarios +1. **Pick sync OR async and stay consistent.** Do not mix `azure.data.tables` sync clients with `azure.data.tables.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with TableClient(...) as client:` (sync) or `async with TableClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Design partition keys** for query patterns and even distribution +4. **Query within partitions** whenever possible (cross-partition is expensive) +5. **Use batch operations** for multiple entities in same partition +6. **Use `upsert_entity`** for idempotent writes +7. **Use parameterized queries** to prevent injection +8. **Keep entities small** — max 1MB per entity +9. **Use async client** for high-throughput scenarios diff --git a/.github/plugins/azure-sdk-python/skills/azure-eventgrid-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-eventgrid-py/SKILL.md index f20ee22b..eb870904 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-eventgrid-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-eventgrid-py/SKILL.md @@ -59,26 +59,25 @@ client = EventGridPublisherClient(endpoint, credential) from azure.eventgrid import EventGridPublisherClient, CloudEvent from azure.identity import DefaultAzureCredential -client = EventGridPublisherClient(endpoint, DefaultAzureCredential()) - -# Single event -event = CloudEvent( - type="MyApp.Events.OrderCreated", - source="/myapp/orders", - data={"order_id": "12345", "amount": 99.99} -) -client.send(event) - -# Multiple events -events = [ - CloudEvent( +with EventGridPublisherClient(endpoint, DefaultAzureCredential()) as client: + # Single event + event = CloudEvent( type="MyApp.Events.OrderCreated", source="/myapp/orders", - data={"order_id": f"order-{i}"} + data={"order_id": "12345", "amount": 99.99} ) - for i in range(10) -] -client.send(events) + client.send(event) + + # Multiple events + events = [ + CloudEvent( + type="MyApp.Events.OrderCreated", + source="/myapp/orders", + data={"order_id": f"order-{i}"} + ) + for i in range(10) + ] + client.send(events) ``` ## Publish EventGridEvents @@ -153,17 +152,18 @@ asyncio.run(publish_events()) For Event Grid Namespaces (pull delivery): ```python -from azure.eventgrid.aio import EventGridPublisherClient +from azure.eventgrid import EventGridPublisherClient +from azure.identity import DefaultAzureCredential # Namespace endpoint (different from custom topic) namespace_endpoint = "https://..eventgrid.azure.net" topic_name = "my-topic" -async with EventGridPublisherClient( +with EventGridPublisherClient( endpoint=namespace_endpoint, credential=DefaultAzureCredential() ) as client: - await client.send( + client.send( event, namespace_topic=topic_name ) @@ -171,9 +171,11 @@ async with EventGridPublisherClient( ## Best Practices -1. **Use CloudEvents** for new applications (industry standard) -2. **Batch events** when publishing multiple events -3. **Include meaningful subjects** for filtering -4. **Use async client** for high-throughput scenarios -5. **Handle retries** — Event Grid has built-in retry -6. **Set appropriate event types** for routing and filtering +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use CloudEvents** for new applications (industry standard) +4. **Batch events** when publishing multiple events +5. **Include meaningful subjects** for filtering +6. **Use async client** for high-throughput scenarios +7. **Handle retries** — Event Grid has built-in retry +8. **Set appropriate event types** for routing and filtering diff --git a/.github/plugins/azure-sdk-python/skills/azure-eventhub-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-eventhub-py/SKILL.md index b784a8dd..6f7f5de4 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-eventhub-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-eventhub-py/SKILL.md @@ -232,13 +232,14 @@ with producer: ## Best Practices -1. **Use batches** for sending multiple events -2. **Use checkpoint store** in production for reliable processing -3. **Use async client** for high-throughput scenarios -4. **Use partition keys** for ordered delivery within a partition -5. **Handle batch size limits** — catch ValueError when batch is full -6. **Use context managers** (`with`/`async with`) for proper cleanup -7. **Set appropriate consumer groups** for different applications +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async) for proper cleanup. For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use batches** for sending multiple events +4. **Use checkpoint store** in production for reliable processing +5. **Use async client** for high-throughput scenarios +6. **Use partition keys** for ordered delivery within a partition +7. **Handle batch size limits** — catch ValueError when batch is full +8. **Set appropriate consumer groups** for different applications ## Reference Files diff --git a/.github/plugins/azure-sdk-python/skills/azure-identity-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-identity-py/SKILL.md index 5f0fb160..6d1328d0 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-identity-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-identity-py/SKILL.md @@ -362,14 +362,13 @@ credential = AzurePipelinesCredential( ```python from azure.identity import DefaultAzureCredential -credential = DefaultAzureCredential() - -# Get token for a specific scope -token = credential.get_token("https://management.azure.com/.default") -print(f"Token expires: {token.expires_on}") +with DefaultAzureCredential() as credential: + # Get token for a specific scope + token = credential.get_token("https://management.azure.com/.default") + print(f"Token expires: {token.expires_on}") -# For Azure Database for PostgreSQL -token = credential.get_token("https://ossrdbms-aad.database.windows.net/.default") + # For Azure Database for PostgreSQL + token = credential.get_token("https://ossrdbms-aad.database.windows.net/.default") ``` ## Async Credentials @@ -453,16 +452,16 @@ credential = ClientSecretCredential( from azure.identity import DefaultAzureCredential, CredentialUnavailableError from azure.core.exceptions import ClientAuthenticationError -credential = DefaultAzureCredential() -try: - token = credential.get_token("https://management.azure.com/.default") -except CredentialUnavailableError: - # No credential in the chain could attempt authentication - pass -except ClientAuthenticationError as e: - # Authentication was attempted but failed - # e.message contains details from each credential in the chain - pass +with DefaultAzureCredential() as credential: + try: + token = credential.get_token("https://management.azure.com/.default") + except CredentialUnavailableError: + # No credential in the chain could attempt authentication + pass + except ClientAuthenticationError as e: + # Authentication was attempted but failed + # e.message contains details from each credential in the chain + pass ``` ## Logging @@ -499,17 +498,18 @@ AZURE_LOG_LEVEL=debug ## Best Practices -1. **Use `DefaultAzureCredential`** for code that runs locally and in Azure -2. **Never hardcode credentials** — use environment variables or managed identity -3. **Prefer managed identity** in production Azure deployments -4. **Use `get_bearer_token_provider`** for non-Azure-SDK clients (OpenAI, REST APIs) -5. **Use `ChainedTokenCredential`** when you need a custom credential order -6. **Close async credentials** — use `async with credential:` context manager -7. **Set `AZURE_CLIENT_ID`** for user-assigned managed identities (object ID and resource ID are also valid identifiers) -8. **Exclude unused credentials** to speed up `DefaultAzureCredential` authentication -9. **Use `CertificateCredential`** (not `ClientCertificateCredential` — that name doesn't exist) -10. **Enable `cache_persistence_options`** for long-running services to reduce token requests -11. **Reuse credential instances** — same credential can be shared across multiple clients +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Use credentials as context managers** (`with DefaultAzureCredential() as credential:`) when they own token caches / HTTP transports you want cleaned up; for async, use `async with` on credentials from `azure.identity.aio`. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential for code that runs in Azure. +4. **Never hardcode credentials** — use environment variables or managed identity +5. **Prefer managed identity** in production Azure deployments +6. **Use `get_bearer_token_provider`** for non-Azure-SDK clients (OpenAI, REST APIs) +7. **Use `ChainedTokenCredential`** when you need a custom credential order +8. **Set `AZURE_CLIENT_ID`** for user-assigned managed identities (object ID and resource ID are also valid identifiers) +9. **Exclude unused credentials** to speed up `DefaultAzureCredential` authentication +10. **Use `CertificateCredential`** (not `ClientCertificateCredential` — that name doesn't exist) +11. **Enable `cache_persistence_options`** for long-running services to reduce token requests +12. **Reuse credential instances** — same credential can be shared across multiple clients ## Reference Links diff --git a/.github/plugins/azure-sdk-python/skills/azure-keyvault-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-keyvault-py/SKILL.md index 19c31e58..83840332 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-keyvault-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-keyvault-py/SKILL.md @@ -132,33 +132,32 @@ deleted_key = poller.result() from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm # Get crypto client for a specific key -crypto_client = CryptographyClient(key, credential=credential) +# crypto_client = CryptographyClient(key, credential=credential) # Or from key ID -crypto_client = CryptographyClient( +with CryptographyClient( "https://.vault.azure.net/keys//", credential=credential -) - -# Encrypt -plaintext = b"Hello, Key Vault!" -result = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, plaintext) -ciphertext = result.ciphertext - -# Decrypt -result = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, ciphertext) -decrypted = result.plaintext - -# Sign -from azure.keyvault.keys.crypto import SignatureAlgorithm -import hashlib - -digest = hashlib.sha256(b"data to sign").digest() -result = crypto_client.sign(SignatureAlgorithm.rs256, digest) -signature = result.signature - -# Verify -result = crypto_client.verify(SignatureAlgorithm.rs256, digest, signature) -print(f"Valid: {result.is_valid}") +) as crypto_client: + # Encrypt + plaintext = b"Hello, Key Vault!" + result = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, plaintext) + ciphertext = result.ciphertext + + # Decrypt + result = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, ciphertext) + decrypted = result.plaintext + + # Sign + from azure.keyvault.keys.crypto import SignatureAlgorithm + import hashlib + + digest = hashlib.sha256(b"data to sign").digest() + result = crypto_client.sign(SignatureAlgorithm.rs256, digest) + signature = result.signature + + # Verify + result = crypto_client.verify(SignatureAlgorithm.rs256, digest, signature) + print(f"Valid: {result.is_valid}") ``` ## Certificates @@ -189,9 +188,9 @@ print(f"Thumbprint: {certificate.properties.x509_thumbprint.hex()}") # Get certificate with private key (as secret) from azure.keyvault.secrets import SecretClient -secret_client = SecretClient(vault_url=vault_url, credential=credential) -cert_secret = secret_client.get_secret("my-cert") -# cert_secret.value contains PEM or PKCS12 +with SecretClient(vault_url=vault_url, credential=credential) as secret_client: + cert_secret = secret_client.get_secret("my-cert") + # cert_secret.value contains PEM or PKCS12 # List certificates for cert in client.list_properties_of_certificates(): @@ -218,12 +217,10 @@ from azure.identity.aio import DefaultAzureCredential from azure.keyvault.secrets.aio import SecretClient async def get_secret(): - credential = DefaultAzureCredential() - client = SecretClient(vault_url=vault_url, credential=credential) - - async with client: - secret = await client.get_secret("my-secret") - print(secret.value) + async with DefaultAzureCredential() as credential: + async with SecretClient(vault_url=vault_url, credential=credential) as client: + secret = await client.get_secret("my-secret") + print(secret.value) import asyncio asyncio.run(get_secret()) @@ -246,11 +243,13 @@ except HttpResponseError as e: ## Best Practices -1. **Use DefaultAzureCredential** for authentication -2. **Use managed identity** in Azure-hosted applications -3. **Enable soft-delete** for recovery (enabled by default) -4. **Use RBAC** over access policies for fine-grained control -5. **Rotate secrets** regularly using versioning -6. **Use Key Vault references** in App Service/Functions config -7. **Cache secrets** appropriately to reduce API calls -8. **Use async clients** for high-throughput scenarios +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential for code that runs in Azure. +4. **Use managed identity** in Azure-hosted applications +5. **Enable soft-delete** for recovery (enabled by default) +6. **Use RBAC** over access policies for fine-grained control +7. **Rotate secrets** regularly using versioning +8. **Use Key Vault references** in App Service/Functions config +9. **Cache secrets** appropriately to reduce API calls +10. **Use async clients** for high-throughput scenarios diff --git a/.github/plugins/azure-sdk-python/skills/azure-messaging-webpubsubservice-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-messaging-webpubsubservice-py/SKILL.md index 1b90f274..834cc2da 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-messaging-webpubsubservice-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-messaging-webpubsubservice-py/SKILL.md @@ -218,17 +218,13 @@ from azure.messaging.webpubsubservice.aio import WebPubSubServiceClient from azure.identity.aio import DefaultAzureCredential async def broadcast(): - credential = DefaultAzureCredential() - client = WebPubSubServiceClient( - endpoint="https://.webpubsub.azure.com", - hub="my-hub", - credential=credential - ) - - await client.send_to_all("Hello async!", content_type="text/plain") - - await client.close() - await credential.close() + async with DefaultAzureCredential() as credential: + async with WebPubSubServiceClient( + endpoint="https://.webpubsub.azure.com", + hub="my-hub", + credential=credential + ) as client: + await client.send_to_all("Hello async!", content_type="text/plain") ``` ## Client Operations @@ -247,10 +243,12 @@ async def broadcast(): ## Best Practices -1. **Use roles** to limit client permissions -2. **Use groups** for targeted messaging -3. **Generate short-lived tokens** for security -4. **Use user IDs** to send to users across connections -5. **Handle reconnection** in client applications -6. **Use JSON** content type for structured data -7. **Close connections** gracefully with reasons +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use roles** to limit client permissions +4. **Use groups** for targeted messaging +5. **Generate short-lived tokens** for security +6. **Use user IDs** to send to users across connections +7. **Handle reconnection** in client applications +8. **Use JSON** content type for structured data +9. **Close connections** gracefully with reasons diff --git a/.github/plugins/azure-sdk-python/skills/azure-mgmt-apicenter-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-mgmt-apicenter-py/SKILL.md index ec284223..fb8237ef 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-mgmt-apicenter-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-mgmt-apicenter-py/SKILL.md @@ -245,9 +245,11 @@ metadata = client.metadata_schemas.create_or_update( ## Best Practices -1. **Use workspaces** to organize APIs by team or domain -2. **Define metadata schemas** for consistent governance -3. **Track deployments** to understand where APIs are running -4. **Import specifications** to enable API analysis and linting -5. **Use lifecycle stages** to track API maturity -6. **Add contacts** for API ownership and support +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use workspaces** to organize APIs by team or domain +4. **Define metadata schemas** for consistent governance +5. **Track deployments** to understand where APIs are running +6. **Import specifications** to enable API analysis and linting +7. **Use lifecycle stages** to track API maturity +8. **Add contacts** for API ownership and support diff --git a/.github/plugins/azure-sdk-python/skills/azure-mgmt-apimanagement-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-mgmt-apimanagement-py/SKILL.md index 1b3e8177..b58134db 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-mgmt-apimanagement-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-mgmt-apimanagement-py/SKILL.md @@ -281,9 +281,11 @@ user = client.user.create_or_update( ## Best Practices -1. **Use named values** for secrets and configuration -2. **Apply policies** at appropriate scopes (global, product, API, operation) -3. **Use products** to bundle APIs and manage access -4. **Enable Application Insights** for monitoring -5. **Use backends** to abstract backend services -6. **Version your APIs** using APIM's versioning features +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use named values** for secrets and configuration +4. **Apply policies** at appropriate scopes (global, product, API, operation) +5. **Use products** to bundle APIs and manage access +6. **Enable Application Insights** for monitoring +7. **Use backends** to abstract backend services +8. **Version your APIs** using APIM's versioning features diff --git a/.github/plugins/azure-sdk-python/skills/azure-mgmt-botservice-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-mgmt-botservice-py/SKILL.md index 47f2a021..99c3cb89 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-mgmt-botservice-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-mgmt-botservice-py/SKILL.md @@ -55,31 +55,30 @@ from azure.mgmt.botservice.models import Bot, BotProperties, Sku from azure.identity import DefaultAzureCredential import os -credential = DefaultAzureCredential() -client = AzureBotService( - credential=credential, - subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"] -) - resource_group = os.environ["AZURE_RESOURCE_GROUP"] bot_name = "my-chat-bot" -bot = client.bots.create( - resource_group_name=resource_group, - resource_name=bot_name, - parameters=Bot( - location="global", - sku=Sku(name="F0"), # Free tier - kind="azurebot", - properties=BotProperties( - display_name="My Chat Bot", - description="A conversational AI bot", - endpoint="https://my-bot-app.azurewebsites.net/api/messages", - msa_app_id="", - msa_app_type="MultiTenant" +credential = DefaultAzureCredential() +with AzureBotService( + credential=credential, + subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"] +) as client: + bot = client.bots.create( + resource_group_name=resource_group, + resource_name=bot_name, + parameters=Bot( + location="global", + sku=Sku(name="F0"), # Free tier + kind="azurebot", + properties=BotProperties( + display_name="My Chat Bot", + description="A conversational AI bot", + endpoint="https://my-bot-app.azurewebsites.net/api/messages", + msa_app_id="", + msa_app_type="MultiTenant" + ) ) ) -) print(f"Bot created: {bot.name}") ``` @@ -321,10 +320,12 @@ for conn in connections: ## Best Practices -1. **Use DefaultAzureCredential** for authentication -2. **Start with F0 SKU** for development, upgrade to S1 for production -3. **Store MSA App ID/Secret securely** — use Key Vault -4. **Enable only needed channels** — reduces attack surface -5. **Rotate Direct Line keys** periodically -6. **Use managed identity** when possible for bot connections -7. **Configure proper CORS** for Web Chat channel +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential for code that runs in Azure. +4. **Start with F0 SKU** for development, upgrade to S1 for production +5. **Store MSA App ID/Secret securely** — use Key Vault +6. **Enable only needed channels** — reduces attack surface +7. **Rotate Direct Line keys** periodically +8. **Use managed identity** when possible for bot connections +9. **Configure proper CORS** for Web Chat channel diff --git a/.github/plugins/azure-sdk-python/skills/azure-mgmt-fabric-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-mgmt-fabric-py/SKILL.md index 097823aa..b0e13515 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-mgmt-fabric-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-mgmt-fabric-py/SKILL.md @@ -55,31 +55,30 @@ from azure.mgmt.fabric.models import FabricCapacity, FabricCapacityProperties, C from azure.identity import DefaultAzureCredential import os -credential = DefaultAzureCredential() -client = FabricMgmtClient( - credential=credential, - subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"] -) - resource_group = os.environ["AZURE_RESOURCE_GROUP"] capacity_name = "myfabriccapacity" -capacity = client.fabric_capacities.begin_create_or_update( - resource_group_name=resource_group, - capacity_name=capacity_name, - resource=FabricCapacity( - location="eastus", - sku=CapacitySku( - name="F2", # Fabric SKU - tier="Fabric" - ), - properties=FabricCapacityProperties( - administration=FabricCapacityAdministration( - members=["user@contoso.com"] +credential = DefaultAzureCredential() +with FabricMgmtClient( + credential=credential, + subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"] +) as client: + capacity = client.fabric_capacities.begin_create_or_update( + resource_group_name=resource_group, + capacity_name=capacity_name, + resource=FabricCapacity( + location="eastus", + sku=CapacitySku( + name="F2", # Fabric SKU + tier="Fabric" + ), + properties=FabricCapacityProperties( + administration=FabricCapacityAdministration( + members=["user@contoso.com"] + ) ) ) - ) -).result() + ).result() print(f"Capacity created: {capacity.name}") ``` @@ -258,11 +257,13 @@ capacity = poller.result() ## Best Practices -1. **Use DefaultAzureCredential** for authentication -2. **Suspend unused capacities** to reduce costs -3. **Start with smaller SKUs** and scale up as needed -4. **Use tags** for cost tracking and organization -5. **Check name availability** before creating capacities -6. **Handle LRO properly** — don't assume immediate completion -7. **Set up capacity admins** — specify users who can manage workspaces -8. **Monitor capacity usage** via Azure Monitor metrics +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential for code that runs in Azure. +4. **Suspend unused capacities** to reduce costs +5. **Start with smaller SKUs** and scale up as needed +6. **Use tags** for cost tracking and organization +7. **Check name availability** before creating capacities +8. **Handle LRO properly** — don't assume immediate completion +9. **Set up capacity admins** — specify users who can manage workspaces +10. **Monitor capacity usage** via Azure Monitor metrics diff --git a/.github/plugins/azure-sdk-python/skills/azure-monitor-ingestion-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-monitor-ingestion-py/SKILL.md index 09a4c81f..513a0254 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-monitor-ingestion-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-monitor-ingestion-py/SKILL.md @@ -70,11 +70,6 @@ from azure.monitor.ingestion import LogsIngestionClient from azure.identity import DefaultAzureCredential import os -client = LogsIngestionClient( - endpoint=os.environ["AZURE_DCE_ENDPOINT"], - credential=DefaultAzureCredential() -) - rule_id = os.environ["AZURE_DCR_RULE_ID"] stream_name = os.environ["AZURE_DCR_STREAM_NAME"] @@ -84,7 +79,11 @@ logs = [ {"TimeGenerated": "2024-01-15T10:02:00Z", "Computer": "server2", "Message": "Connection established"} ] -client.upload(rule_id=rule_id, stream_name=stream_name, logs=logs) +with LogsIngestionClient( + endpoint=os.environ["AZURE_DCE_ENDPOINT"], + credential=DefaultAzureCredential() +) as client: + client.upload(rule_id=rule_id, stream_name=stream_name, logs=logs) ``` ## Upload from JSON File @@ -205,11 +204,12 @@ Stream names follow patterns: ## Best Practices -1. **Use DefaultAzureCredential** for authentication -2. **Handle errors gracefully** — use `on_error` callback for partial failures -3. **Include TimeGenerated** — Required field for all logs -4. **Match DCR schema** — Log fields must match DCR column definitions -5. **Use async client** for high-throughput scenarios -6. **Batch uploads** — SDK handles batching, but send reasonable chunks -7. **Monitor ingestion** — Check Log Analytics for ingestion status -8. **Use context manager** — Ensures proper client cleanup +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async) to ensure proper cleanup. For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential for code that runs in Azure. +4. **Handle errors gracefully** — use `on_error` callback for partial failures +5. **Include TimeGenerated** — Required field for all logs +6. **Match DCR schema** — Log fields must match DCR column definitions +7. **Use async client** for high-throughput scenarios +8. **Batch uploads** — SDK handles batching, but send reasonable chunks +9. **Monitor ingestion** — Check Log Analytics for ingestion status diff --git a/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-exporter-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-exporter-py/SKILL.md index a46f7d2a..62a3b66b 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-exporter-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-exporter-py/SKILL.md @@ -210,9 +210,11 @@ exporter = AzureMonitorTraceExporter( ## Best Practices -1. **Use BatchSpanProcessor** for production (not SimpleSpanProcessor) -2. **Use ApplicationInsightsSampler** for consistent sampling across services -3. **Enable offline storage** for reliability in production -4. **Use AAD authentication** instead of instrumentation keys -5. **Set export intervals** appropriate for your workload -6. **Use the distro** (`azure-monitor-opentelemetry`) unless you need custom pipelines +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Flush and shut down providers at process exit.** Call the shutdown/flush APIs (e.g. `tracer_provider.shutdown()`, `meter_provider.shutdown()`, `logger_provider.shutdown()`) at process exit to flush telemetry before the process terminates. +3. **Use BatchSpanProcessor** for production (not SimpleSpanProcessor) +4. **Use ApplicationInsightsSampler** for consistent sampling across services +5. **Enable offline storage** for reliability in production +6. **Use Microsoft Entra authentication** instead of instrumentation keys +7. **Set export intervals** appropriate for your workload +8. **Use the distro** (`azure-monitor-opentelemetry`) unless you need custom pipelines diff --git a/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-py/SKILL.md index f1822588..422d449b 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-monitor-opentelemetry-py/SKILL.md @@ -226,10 +226,12 @@ configure_azure_monitor( ## Best Practices -1. **Call configure_azure_monitor() early** — Before importing instrumented libraries -2. **Use environment variables** for connection string in production -3. **Set cloud role name** for multi-service applications -4. **Enable sampling** in high-traffic applications -5. **Use structured logging** for better log analytics queries -6. **Add custom attributes** to spans for better debugging -7. **Use AAD authentication** for production workloads +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Flush and shut down providers at process exit.** Call the shutdown/flush APIs (e.g. `tracer_provider.shutdown()`, `meter_provider.shutdown()`, `logger_provider.shutdown()`) at process exit to flush telemetry before the process terminates. +3. **Call configure_azure_monitor() early** — Before importing instrumented libraries +4. **Use environment variables** for connection string in production +5. **Set cloud role name** for multi-service applications +6. **Enable sampling** in high-traffic applications +7. **Use structured logging** for better log analytics queries +8. **Add custom attributes** to spans for better debugging +9. **Use Microsoft Entra authentication** for production workloads diff --git a/.github/plugins/azure-sdk-python/skills/azure-monitor-query-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-monitor-query-py/SKILL.md index 8231f1be..fcc05c0c 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-monitor-query-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-monitor-query-py/SKILL.md @@ -51,8 +51,6 @@ credential = DefaultAzureCredential(require_envvar=True) from azure.monitor.query import LogsQueryClient from datetime import timedelta -client = LogsQueryClient(credential) - query = """ AppRequests | where TimeGenerated > ago(1h) @@ -60,15 +58,16 @@ AppRequests | order by TimeGenerated desc """ -response = client.query_workspace( - workspace_id=os.environ["AZURE_LOG_ANALYTICS_WORKSPACE_ID"], - query=query, - timespan=timedelta(hours=1) -) +with LogsQueryClient(credential) as client: + response = client.query_workspace( + workspace_id=os.environ["AZURE_LOG_ANALYTICS_WORKSPACE_ID"], + query=query, + timespan=timedelta(hours=1) + ) -for table in response.tables: - for row in table.rows: - print(row) + for table in response.tables: + for row in table.rows: + print(row) ``` ### Query with Time Range @@ -137,20 +136,19 @@ elif response.status == LogsQueryStatus.FAILURE: from azure.monitor.query import MetricsQueryClient from datetime import timedelta -metrics_client = MetricsQueryClient(credential) - -response = metrics_client.query_resource( - resource_uri=os.environ["AZURE_METRICS_RESOURCE_URI"], - metric_names=["Percentage CPU", "Network In Total"], - timespan=timedelta(hours=1), - granularity=timedelta(minutes=5) -) +with MetricsQueryClient(credential) as metrics_client: + response = metrics_client.query_resource( + resource_uri=os.environ["AZURE_METRICS_RESOURCE_URI"], + metric_names=["Percentage CPU", "Network In Total"], + timespan=timedelta(hours=1), + granularity=timedelta(minutes=5) + ) -for metric in response.metrics: - print(f"{metric.name}:") - for time_series in metric.timeseries: - for data in time_series.data: - print(f" {data.timestamp}: {data.average}") + for metric in response.metrics: + print(f"{metric.name}:") + for time_series in metric.timeseries: + for data in time_series.data: + print(f" {data.timestamp}: {data.average}") ``` ### Aggregations @@ -205,18 +203,14 @@ from azure.monitor.query.aio import LogsQueryClient, MetricsQueryClient from azure.identity.aio import DefaultAzureCredential async def query_logs(): - credential = DefaultAzureCredential() - client = LogsQueryClient(credential) - - response = await client.query_workspace( - workspace_id=workspace_id, - query="AppRequests | take 10", - timespan=timedelta(hours=1) - ) - - await client.close() - await credential.close() - return response + async with DefaultAzureCredential() as credential: + async with LogsQueryClient(credential) as client: + response = await client.query_workspace( + workspace_id=workspace_id, + query="AppRequests | take 10", + timespan=timedelta(hours=1) + ) + return response ``` ## Common Kusto Queries @@ -252,10 +246,12 @@ AppExceptions ## Best Practices -1. **Use timedelta** for relative time ranges -2. **Handle partial results** for large queries -3. **Use batch queries** when running multiple queries -4. **Set appropriate granularity** for metrics to reduce data points -5. **Convert to DataFrame** for easier data analysis -6. **Use aggregations** to summarize metric data -7. **Filter by dimensions** to narrow metric results +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use timedelta** for relative time ranges +4. **Handle partial results** for large queries +5. **Use batch queries** when running multiple queries +6. **Set appropriate granularity** for metrics to reduce data points +7. **Convert to DataFrame** for easier data analysis +8. **Use aggregations** to summarize metric data +9. **Filter by dimensions** to narrow metric results diff --git a/.github/plugins/azure-sdk-python/skills/azure-search-documents-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-search-documents-py/SKILL.md index 25148e0e..d3ecda31 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-search-documents-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-search-documents-py/SKILL.md @@ -112,7 +112,8 @@ index = SearchIndex( vector_search=vector_search ) -index_client.create_or_update_index(index) +with index_client: + index_client.create_or_update_index(index) ``` ## Upload Documents @@ -120,8 +121,6 @@ index_client.create_or_update_index(index) ```python from azure.search.documents import SearchClient -client = SearchClient(endpoint, "my-index", AzureKeyCredential(key)) - documents = [ { "id": "1", @@ -131,8 +130,9 @@ documents = [ } ] -result = client.upload_documents(documents) -print(f"Uploaded {len(result)} documents") +with SearchClient(endpoint, "my-index", AzureKeyCredential(key)) as client: + result = client.upload_documents(documents) + print(f"Uploaded {len(result)} documents") ``` ## Keyword Search @@ -268,46 +268,49 @@ from azure.search.documents.indexes.models import ( indexer_client = SearchIndexerClient(endpoint, AzureKeyCredential(key)) -# Create data source -data_source = SearchIndexerDataSourceConnection( - name="my-datasource", - type="azureblob", - connection_string=connection_string, - container={"name": "documents"} -) -indexer_client.create_or_update_data_source_connection(data_source) - -# Create skillset -skillset = SearchIndexerSkillset( - name="my-skillset", - skills=[ - EntityRecognitionSkill( - inputs=[InputFieldMappingEntry(name="text", source="/document/content")], - outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")] - ) - ] -) -indexer_client.create_or_update_skillset(skillset) - -# Create indexer -indexer = SearchIndexer( - name="my-indexer", - data_source_name="my-datasource", - target_index_name="my-index", - skillset_name="my-skillset" -) -indexer_client.create_or_update_indexer(indexer) +with indexer_client: + # Create data source + data_source = SearchIndexerDataSourceConnection( + name="my-datasource", + type="azureblob", + connection_string=connection_string, + container={"name": "documents"} + ) + indexer_client.create_or_update_data_source_connection(data_source) + + # Create skillset + skillset = SearchIndexerSkillset( + name="my-skillset", + skills=[ + EntityRecognitionSkill( + inputs=[InputFieldMappingEntry(name="text", source="/document/content")], + outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")] + ) + ] + ) + indexer_client.create_or_update_skillset(skillset) + + # Create indexer + indexer = SearchIndexer( + name="my-indexer", + data_source_name="my-datasource", + target_index_name="my-index", + skillset_name="my-skillset" + ) + indexer_client.create_or_update_indexer(indexer) ``` ## Best Practices -1. **Use hybrid search** for best relevance combining vector and keyword -2. **Enable semantic ranking** for natural language queries -3. **Index in batches** of 100-1000 documents for efficiency -4. **Use filters** to narrow results before ranking -5. **Configure vector dimensions** to match your embedding model -6. **Use HNSW algorithm** for large-scale vector search -7. **Create suggesters** at index creation time (cannot add later) +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use hybrid search** for best relevance combining vector and keyword +4. **Enable semantic ranking** for natural language queries +5. **Index in batches** of 100-1000 documents for efficiency +6. **Use filters** to narrow results before ranking +7. **Configure vector dimensions** to match your embedding model +8. **Use HNSW algorithm** for large-scale vector search +9. **Create suggesters** at index creation time (cannot add later) ## Reference Files @@ -422,7 +425,8 @@ index = SearchIndex( ) index_client = SearchIndexClient(endpoint, credential) -index_client.create_or_update_index(index) +with index_client: + index_client.create_or_update_index(index) ``` ## Document Operations @@ -435,11 +439,11 @@ with SearchIndexingBufferedSender(endpoint, index_name, credential) as sender: sender.upload_documents(documents) # Direct operations via SearchClient -search_client = SearchClient(endpoint, index_name, credential) -search_client.upload_documents(documents) # Add new -search_client.merge_documents(documents) # Update existing -search_client.merge_or_upload_documents(documents) # Upsert -search_client.delete_documents(documents) # Remove +with SearchClient(endpoint, index_name, credential) as search_client: + search_client.upload_documents(documents) # Add new + search_client.merge_documents(documents) # Update existing + search_client.merge_or_upload_documents(documents) # Upsert + search_client.delete_documents(documents) # Remove ``` ## Search Patterns @@ -500,7 +504,7 @@ async with SearchClient(endpoint, index_name, credential) as client: ## Best Practices 1. **Use environment variables** for endpoints, keys, and deployment names -2. **Prefer `DefaultAzureCredential`** over API keys for production +2. **Use `DefaultAzureCredential`** for code that runs locally (instead of API keys). Use a specific token credential for code that runs in Azure. 3. **Use `SearchIndexingBufferedSender`** for batch uploads (handles batching/retries) 4. **Always define semantic configuration** for agentic retrieval indexes 5. **Use `create_or_update_index`** for idempotent index creation diff --git a/.github/plugins/azure-sdk-python/skills/azure-servicebus-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-servicebus-py/SKILL.md index 194b28c9..ed373d57 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-servicebus-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-servicebus-py/SKILL.md @@ -259,13 +259,14 @@ with ServiceBusClient( ## Best Practices -1. **Use async client** for production workloads -2. **Use context managers** (`async with`) for proper cleanup -3. **Complete messages** after successful processing -4. **Use dead-letter queue** for poison messages -5. **Use sessions** for ordered, FIFO processing -6. **Use message batches** for high-throughput scenarios -7. **Set `max_wait_time`** to avoid infinite blocking +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async) for proper cleanup. For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use async client** for production workloads +4. **Complete messages** after successful processing +5. **Use dead-letter queue** for poison messages +6. **Use sessions** for ordered, FIFO processing +7. **Use message batches** for high-throughput scenarios +8. **Set `max_wait_time`** to avoid infinite blocking ## Reference Files diff --git a/.github/plugins/azure-sdk-python/skills/azure-speech-to-text-rest-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-speech-to-text-rest-py/SKILL.md index c6e6dd3d..b0f96a3e 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-speech-to-text-rest-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-speech-to-text-rest-py/SKILL.md @@ -351,12 +351,14 @@ Common language codes (see [full list](https://learn.microsoft.com/azure/ai-serv ## Best Practices -1. **Use WAV PCM 16kHz mono** for best compatibility -2. **Enable chunked transfer** for lower latency -3. **Cache access tokens** for 9 minutes (valid for 10) -4. **Specify the correct language** for accurate recognition -5. **Use detailed format** when you need confidence scores -6. **Handle all RecognitionStatus values** in production code +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients.** Use `with httpx.Client(...) as client:` (sync) or `async with httpx.AsyncClient(...) as client:` (async) so connections are pooled and closed deterministically. +3. **Use WAV PCM 16kHz mono** for best compatibility +4. **Enable chunked transfer** for lower latency +5. **Cache access tokens** for 9 minutes (valid for 10) +6. **Specify the correct language** for accurate recognition +7. **Use detailed format** when you need confidence scores +8. **Handle all RecognitionStatus values** in production code ## When NOT to Use This API diff --git a/.github/plugins/azure-sdk-python/skills/azure-storage-blob-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-storage-blob-py/SKILL.md index b6dc6081..e7b8c360 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-storage-blob-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-storage-blob-py/SKILL.md @@ -139,20 +139,19 @@ blob_client.delete_blob(delete_snapshots="include") ```python # Configure chunk sizes for large uploads/downloads -blob_client = BlobClient( +with BlobClient( account_url=account_url, container_name="mycontainer", blob_name="large-file.zip", credential=credential, max_block_size=4 * 1024 * 1024, # 4 MiB blocks max_single_put_size=64 * 1024 * 1024 # 64 MiB single upload limit -) - -# Parallel upload -blob_client.upload_blob(data, max_concurrency=4) +) as blob_client: + # Parallel upload + blob_client.upload_blob(data, max_concurrency=4) -# Parallel download -download_stream = blob_client.download_blob(max_concurrency=4) + # Parallel download + download_stream = blob_client.download_blob(max_concurrency=4) ``` ## SAS Tokens @@ -200,13 +199,12 @@ from azure.identity.aio import DefaultAzureCredential from azure.storage.blob.aio import BlobServiceClient async def upload_async(): - credential = DefaultAzureCredential() - - async with BlobServiceClient(account_url, credential=credential) as client: - blob_client = client.get_blob_client("mycontainer", "sample.txt") - - with open("./file.txt", "rb") as data: - await blob_client.upload_blob(data, overwrite=True) + async with DefaultAzureCredential() as credential: + async with BlobServiceClient(account_url, credential=credential) as client: + blob_client = client.get_blob_client("mycontainer", "sample.txt") + + with open("./file.txt", "rb") as data: + await blob_client.upload_blob(data, overwrite=True) # Download async async def download_async(): @@ -219,10 +217,11 @@ async def download_async(): ## Best Practices -1. **Use DefaultAzureCredential** instead of connection strings -2. **Use context managers** for async clients -3. **Set `overwrite=True`** explicitly when re-uploading -4. **Use `max_concurrency`** for large file transfers -5. **Prefer `readinto()`** over `readall()` for memory efficiency -6. **Use `walk_blobs()`** for hierarchical listing -7. **Set appropriate content types** for web-served blobs +1. **Pick sync OR async and stay consistent.** Do not mix `azure.storage.blob` sync clients with `azure.storage.blob.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with BlobServiceClient(...) as client:` (sync) or `async with BlobServiceClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally (instead of connection strings). Use a specific token credential for code that runs in Azure. +4. **Set `overwrite=True`** explicitly when re-uploading +5. **Use `max_concurrency`** for large file transfers +6. **Prefer `readinto()`** over `readall()` for memory efficiency +7. **Use `walk_blobs()`** for hierarchical listing +8. **Set appropriate content types** for web-served blobs diff --git a/.github/plugins/azure-sdk-python/skills/azure-storage-file-datalake-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-storage-file-datalake-py/SKILL.md index d8de6c90..a9274fe7 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-storage-file-datalake-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-storage-file-datalake-py/SKILL.md @@ -191,19 +191,18 @@ from azure.storage.filedatalake.aio import DataLakeServiceClient from azure.identity.aio import DefaultAzureCredential async def datalake_operations(): - credential = DefaultAzureCredential() - - async with DataLakeServiceClient( - account_url="https://.dfs.core.windows.net", - credential=credential - ) as service_client: - file_system_client = service_client.get_file_system_client("myfilesystem") - file_client = file_system_client.get_file_client("test.txt") - - await file_client.upload_data(b"async content", overwrite=True) - - download = await file_client.download_file() - content = await download.readall() + async with DefaultAzureCredential() as credential: + async with DataLakeServiceClient( + account_url="https://.dfs.core.windows.net", + credential=credential + ) as service_client: + file_system_client = service_client.get_file_system_client("myfilesystem") + file_client = file_system_client.get_file_client("test.txt") + + await file_client.upload_data(b"async content", overwrite=True) + + download = await file_client.download_file() + content = await download.readall() import asyncio asyncio.run(datalake_operations()) @@ -211,10 +210,12 @@ asyncio.run(datalake_operations()) ## Best Practices -1. **Use hierarchical namespace** for file system semantics -2. **Use `append_data` + `flush_data`** for large file uploads -3. **Set ACLs at directory level** and inherit to children -4. **Use async client** for high-throughput scenarios -5. **Use `get_paths` with `recursive=True`** for full directory listing -6. **Set metadata** for custom file attributes -7. **Consider Blob API** for simple object storage use cases +1. **Pick sync OR async and stay consistent.** Do not mix `azure.storage.filedatalake` sync clients with `azure.storage.filedatalake.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with DataLakeServiceClient(...) as client:` (sync) or `async with DataLakeServiceClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use hierarchical namespace** for file system semantics +4. **Use `append_data` + `flush_data`** for large file uploads +5. **Set ACLs at directory level** and inherit to children +6. **Use async client** for high-throughput scenarios +7. **Use `get_paths` with `recursive=True`** for full directory listing +8. **Set metadata** for custom file attributes +9. **Consider Blob API** for simple object storage use cases diff --git a/.github/plugins/azure-sdk-python/skills/azure-storage-file-share-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-storage-file-share-py/SKILL.md index 1a53c673..66f43e98 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-storage-file-share-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-storage-file-share-py/SKILL.md @@ -217,16 +217,12 @@ from azure.storage.fileshare.aio import ShareServiceClient from azure.identity.aio import DefaultAzureCredential async def upload_file(): - credential = DefaultAzureCredential() - service = ShareServiceClient(account_url, credential=credential) - - share = service.get_share_client("my-share") - file_client = share.get_file_client("test.txt") - - await file_client.upload_file("Hello!") - - await service.close() - await credential.close() + async with DefaultAzureCredential() as credential: + async with ShareServiceClient(account_url, credential=credential) as service: + share = service.get_share_client("my-share") + file_client = share.get_file_client("test.txt") + + await file_client.upload_file("Hello!") ``` ## Client Types @@ -240,10 +236,11 @@ async def upload_file(): ## Best Practices -1. **Use connection string** for simplest setup -2. **Use Entra ID** for production with RBAC -3. **Stream large files** using chunks() to avoid memory issues -4. **Create snapshots** before major changes -5. **Set quotas** to prevent unexpected storage costs -6. **Use ranges** for partial file updates -7. **Close async clients** explicitly +1. **Pick sync OR async and stay consistent.** Do not mix `azure.storage.fileshare` sync clients with `azure.storage.fileshare.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with ShareServiceClient(...) as client:` (sync) or `async with ShareServiceClient(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use connection string** for simplest setup +4. **Use Microsoft Entra ID** for production with RBAC +5. **Stream large files** using chunks() to avoid memory issues +6. **Create snapshots** before major changes +7. **Set quotas** to prevent unexpected storage costs +8. **Use ranges** for partial file updates diff --git a/.github/plugins/azure-sdk-python/skills/azure-storage-queue-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/azure-storage-queue-py/SKILL.md index 4bd073f0..fa26edf5 100644 --- a/.github/plugins/azure-sdk-python/skills/azure-storage-queue-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/azure-storage-queue-py/SKILL.md @@ -199,24 +199,25 @@ asyncio.run(queue_operations()) from azure.storage.queue import QueueClient, BinaryBase64EncodePolicy, BinaryBase64DecodePolicy # For binary data -queue_client = QueueClient( +with QueueClient( account_url=account_url, queue_name="myqueue", credential=credential, message_encode_policy=BinaryBase64EncodePolicy(), message_decode_policy=BinaryBase64DecodePolicy() -) - -# Send bytes -queue_client.send_message(b"Binary content") +) as queue_client: + # Send bytes + queue_client.send_message(b"Binary content") ``` ## Best Practices -1. **Delete messages after processing** to prevent reprocessing -2. **Set appropriate visibility timeout** based on processing time -3. **Handle `dequeue_count`** for poison message detection -4. **Use async client** for high-throughput scenarios -5. **Use `peek_messages`** for monitoring without affecting queue -6. **Set `time_to_live`** to prevent stale messages -7. **Consider Service Bus** for advanced features (sessions, topics) +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Delete messages after processing** to prevent reprocessing +4. **Set appropriate visibility timeout** based on processing time +5. **Handle `dequeue_count`** for poison message detection +6. **Use async client** for high-throughput scenarios +7. **Use `peek_messages`** for monitoring without affecting queue +8. **Set `time_to_live`** to prevent stale messages +9. **Consider Service Bus** for advanced features (sessions, topics) diff --git a/.github/plugins/azure-sdk-python/skills/fastapi-router-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/fastapi-router-py/SKILL.md index f6bff23c..0d6f32e6 100644 --- a/.github/plugins/azure-sdk-python/skills/fastapi-router-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/fastapi-router-py/SKILL.md @@ -54,3 +54,8 @@ async def list_items() -> list[Item]: 3. Create corresponding Pydantic models 4. Create service layer if needed 5. Add frontend API functions + +## Best Practices + +1. **Pick `def` or `async def` per endpoint based on whether you call async I/O;** do not mix sync and async blocking calls in one handler. +2. **Manage long-lived resources (DB pools, HTTP clients) in `lifespan` and inject via `Depends`;** use `with`/`async with` for per-request resources. diff --git a/.github/plugins/azure-sdk-python/skills/hosted-agents-v2-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/hosted-agents-v2-py/SKILL.md index 2df659b1..4685a81b 100644 --- a/.github/plugins/azure-sdk-python/skills/hosted-agents-v2-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/hosted-agents-v2-py/SKILL.md @@ -299,50 +299,48 @@ from azure.ai.projects.models import ( ) from azure.identity import DefaultAzureCredential - -project = AIProjectClient( +with AIProjectClient( endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), allow_preview=True, -) - -agent = project.agents.create_version( - agent_name="my-hosted-agent", - definition=HostedAgentDefinition( - container_protocol_versions=[ - ProtocolVersionRecord( - protocol=AgentProtocol.RESPONSES, - version="1.0.0", - ), - ProtocolVersionRecord( - protocol=AgentProtocol.INVOCATIONS, - version="1.0.0", - ), - ], - image="myregistry.azurecr.io/my-agent:v1", - cpu="1", - memory="2Gi", - environment_variables={ - "AZURE_AI_MODEL_DEPLOYMENT_NAME": os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - }, - ), -) - -while True: - version = project.agents.get_version( +) as project: + agent = project.agents.create_version( agent_name="my-hosted-agent", - agent_version=agent.version, + definition=HostedAgentDefinition( + container_protocol_versions=[ + ProtocolVersionRecord( + protocol=AgentProtocol.RESPONSES, + version="1.0.0", + ), + ProtocolVersionRecord( + protocol=AgentProtocol.INVOCATIONS, + version="1.0.0", + ), + ], + image="myregistry.azurecr.io/my-agent:v1", + cpu="1", + memory="2Gi", + environment_variables={ + "AZURE_AI_MODEL_DEPLOYMENT_NAME": os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + }, + ), ) - status = version["status"] - if status == "active": - break - if status == "failed": - raise RuntimeError(f"Hosted agent provisioning failed: {version['error']}") - time.sleep(5) - -openai_client = project.get_openai_client(agent_name="my-hosted-agent") -response = openai_client.responses.create(input="Hello!") -print(response.output_text) + + while True: + version = project.agents.get_version( + agent_name="my-hosted-agent", + agent_version=agent.version, + ) + status = version["status"] + if status == "active": + break + if status == "failed": + raise RuntimeError(f"Hosted agent provisioning failed: {version['error']}") + time.sleep(5) + + openai_client = project.get_openai_client(agent_name="my-hosted-agent") + response = openai_client.responses.create(input="Hello!") + print(response.output_text) ``` For Invocations over REST, call the dedicated endpoint: @@ -427,14 +425,16 @@ REST calls to hosted-agent endpoints require `Foundry-Features: HostedAgents=V1P ## Best Practices -1. Start with Responses unless the caller requires arbitrary JSON, custom SSE, or non-OpenAI protocol semantics. -2. Use Agent Framework for new Python/C# agents; use protocol libraries for existing frameworks or custom code. -3. Test locally on `http://localhost:8088/responses` or `http://localhost:8088/invocations` before deployment. -4. Use immutable image tags, not `latest`, for production versions. -5. Keep secrets out of images and environment variables; prefer managed identity and managed connections. -6. Treat each agent version as immutable. Create a new version for runtime changes. -7. Use Application Insights and OpenTelemetry from the protocol libraries for logs, traces, and metrics. -8. Clean up with `azd down`, `project.agents.delete_version(...)`, or REST delete calls when finished. +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. Start with Responses unless the caller requires arbitrary JSON, custom SSE, or non-OpenAI protocol semantics. +4. Use Agent Framework for new Python/C# agents; use protocol libraries for existing frameworks or custom code. +5. Test locally on `http://localhost:8088/responses` or `http://localhost:8088/invocations` before deployment. +6. Use immutable image tags, not `latest`, for production versions. +7. Keep secrets out of images and environment variables; prefer managed identity and managed connections. +8. Treat each agent version as immutable. Create a new version for runtime changes. +9. Use Application Insights and OpenTelemetry from the protocol libraries for logs, traces, and metrics. +10. Clean up with `azd down`, `project.agents.delete_version(...)`, or REST delete calls when finished. ## Reference Links diff --git a/.github/plugins/azure-sdk-python/skills/m365-agents-py/SKILL.md b/.github/plugins/azure-sdk-python/skills/m365-agents-py/SKILL.md index f6499f0e..3e9d2c7b 100644 --- a/.github/plugins/azure-sdk-python/skills/m365-agents-py/SKILL.md +++ b/.github/plugins/azure-sdk-python/skills/m365-agents-py/SKILL.md @@ -325,14 +325,16 @@ asyncio.run(main()) ## Best Practices -1. Use `microsoft_agents` import prefix (underscores, not dots). -2. Use `MemoryStorage` only for development; use BlobStorage or CosmosDB in production. -3. Always use `load_configuration_from_env(environ)` to load SDK configuration. -4. Include `jwt_authorization_middleware` in aiohttp Application middlewares. -5. Use `MsalConnectionManager` for MSAL-based authentication. -6. Call `end_stream()` in finally blocks when using streaming responses. -7. Use `auth_handlers` parameter on message decorators for OAuth-protected routes. -8. Keep secrets in environment variables, not in source code. +1. **This skill is async-first (aiohttp-based).** Use async handlers and `async with` for aiohttp sessions. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. Use `microsoft_agents` import prefix (underscores, not dots). +4. Use `MemoryStorage` only for development; use BlobStorage or CosmosDB in production. +5. Always use `load_configuration_from_env(environ)` to load SDK configuration. +6. Include `jwt_authorization_middleware` in aiohttp Application middlewares. +7. Use `MsalConnectionManager` for MSAL-based authentication. +8. Call `end_stream()` in finally blocks when using streaming responses. +9. Use `auth_handlers` parameter on message decorators for OAuth-protected routes. +10. Keep secrets in environment variables, not in source code. ## Reference Links diff --git a/.github/skills/skill-creator/SKILL.md b/.github/skills/skill-creator/SKILL.md index a80940ac..3a5588a6 100644 --- a/.github/skills/skill-creator/SKILL.md +++ b/.github/skills/skill-creator/SKILL.md @@ -175,11 +175,45 @@ Azure SDKs use consistent verbs across all languages: See `references/azure-sdk-patterns.md` for detailed patterns including: -- **Python**: `ItemPaged`, `LROPoller`, context managers, Sphinx docstrings +- **Python**: `ItemPaged`, `LROPoller`, context managers, Sphinx docstrings. Pick **sync or async** for the whole skill and stick with it — never mix. Always show `with` / `async with` context managers first. - **.NET**: `Response`, `Pageable`, `Operation`, mocking support - **Java**: Builder pattern, `PagedIterable`/`PagedFlux`, Reactor types - **TypeScript**: `PagedAsyncIterableIterator`, `AbortSignal`, browser considerations +### Required Best Practices in Every Python Skill (User-Facing) + +> **Scope:** This section applies **only to Python skills** (`-py` suffix). The sync/async and context-manager rules below are Python-specific idioms. Skills for .NET, Java, and TypeScript should follow their own language idioms and are not required to include these items. + +**These rules are not just authoring conventions for the skill itself — they MUST be explicitly written into every generated Python skill's `## Best Practices` section so end users who follow the skill apply them in their own code.** + +Add both items verbatim (adapted only for SDK specifics) as the **first two items** of the Best Practices list. Do not assume users will infer them from examples. + +**Standard wording (Python):** + +```markdown +1. **Pick sync OR async and stay consistent.** Do not mix `azure.xxx` sync clients with `azure.xxx.aio` async clients in the same call path. Choose one mode per module. +2. **Always use context managers for clients and async credentials.** Wrap every client in `with Client(...) as client:` (sync) or `async with Client(...) as client:` (async). For async `DefaultAzureCredential` from `azure.identity.aio`, also use `async with credential:` so tokens and transports are cleaned up. +3. **Use `DefaultAzureCredential`** for code that runs locally. Use a specific token credential (e.g. `ManagedIdentityCredential`, `WorkloadIdentityCredential`) for code that runs in Azure. +``` + +**Variants to apply when the SDK shape differs:** + +| Skill type | Adjust item #1 to | Adjust item #2 to | +|---|---|---| +| Async-only SDK (e.g. voicelive) | "This SDK is async-only; use the `.aio` namespace throughout." | keep standard | +| Async-first framework (agent framework, m365-agents) | "This SDK is async-first — use `async def` handlers and `async with` throughout." | keep standard | +| Provider-pattern (OpenTelemetry exporters/distro) | keep standard | "Call `provider.shutdown()` / `flush()` at process exit to flush telemetry — providers are not context managers." | +| REST-over-httpx skills | keep standard | "Use `with httpx.Client(...) as client:` (sync) or `async with httpx.AsyncClient(...) as client:` (async) so connections pool and close deterministically." | +| Identity skill | keep standard | "Use credentials as context managers (`with DefaultAzureCredential() as credential:`) when they own token caches / HTTP transports you want cleaned up; for async, use `async with` on credentials from `azure.identity.aio`." | +| FastAPI (non-Azure) | "Pick `def` or `async def` per endpoint based on whether you call async I/O; do not mix sync and blocking calls in one handler." | "Manage long-lived resources (DB pools, HTTP clients) in `lifespan` and inject via `Depends`; use `with`/`async with` for per-request resources." | +| Pure model/schema skill (no I/O, e.g. pydantic) | **skip both** — not applicable | **skip** | + +**Enforcement in code examples.** Every code example inside the skill must itself obey both rules, so the skill demonstrates what it prescribes: + +- Do not show sync and async calls interleaved in the same example. If you must show both modes, keep the primary example in one mode and isolate the alternative into a single `### Async variant` (or `### Sync variant`) subsection with its own complete example. +- Every client instantiation in every example must be wrapped in `with` / `async with`. The only permitted exception is the mandatory Authentication snippet (which illustrates the credential + client construction pattern) and framework lifespan patterns where a client is owned by the app (e.g. FastAPI `lifespan`). +- When async credentials from `azure.identity.aio` appear in an example, wrap them in `async with credential:` alongside the client. + ### Handling Deprecated or Rebranded SDKs When an Azure SDK has been deprecated or rebranded, update skills to guide users toward the current package while maintaining backward compatibility: @@ -678,6 +712,8 @@ azure-ai-agents/ | Skip acceptance criteria | Skills without tests can't be validated | | Skip symlink categorization | Skills won't be discoverable by category | | Use wrong import paths | Azure SDKs have specific module structures | +| Omit sync/async + context-manager bullets from Best Practices in Python skills | End users won't follow rules that aren't written down; examples alone aren't enough | +| Mix sync and async in the same Python example | Demonstrates the anti-pattern the skill is supposed to prevent | --- @@ -697,6 +733,8 @@ Before completing a skill: - [ ] Authentication uses `DefaultAzureCredential` - [ ] Includes cleanup/delete in examples - [ ] References organized by feature +- [ ] **(Python skills only) Best Practices section contains the two user-facing rules** (sync-or-async consistency + context managers for clients and async credentials), using the variant matched to the skill type +- [ ] **(Python skills only)** Every code example obeys both rules (no mixed sync/async; every client wrapped in `with` / `async with`) **Categorization:** diff --git a/.github/skills/skill-creator/references/azure-sdk-patterns.md b/.github/skills/skill-creator/references/azure-sdk-patterns.md index 057543dd..6c78b596 100644 --- a/.github/skills/skill-creator/references/azure-sdk-patterns.md +++ b/.github/skills/skill-creator/references/azure-sdk-patterns.md @@ -80,6 +80,44 @@ class AsyncConfigurationClient: pass ``` +### Sync vs Async: Pick One, Don't Mix + +**Rule:** Within a single module, script, or code path, use **either** the sync client **or** the async client — never both. + +- Sync clients live in `azure.` (e.g., `azure.ai.projects.AIProjectClient`). +- Async clients live in `azure..aio` (e.g., `azure.ai.projects.aio.AIProjectClient`). +- Mixing sync calls inside an `async def` (or awaiting inside a sync function) blocks the event loop, breaks context managers, and produces subtle concurrency bugs. + +```python +# ✅ Good — all sync +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient(endpoint=endpoint, credential=DefaultAzureCredential()) as client: + agent = client.agents.get_agent("agent-id") + +# ✅ Good — all async +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential + +async with DefaultAzureCredential() as credential, \ + AIProjectClient(endpoint=endpoint, credential=credential) as client: + agent = await client.agents.get_agent("agent-id") + +# ❌ Bad — sync client inside async function +async def run(): + from azure.ai.projects import AIProjectClient # sync! + client = AIProjectClient(endpoint=endpoint, credential=cred) + agent = client.agents.get_agent("agent-id") # blocks the event loop + +# ❌ Bad — mixing sync and async credentials +from azure.identity import DefaultAzureCredential # sync +from azure.ai.projects.aio import AIProjectClient # async +# Async client must be paired with azure.identity.aio.DefaultAzureCredential +``` + +When writing a skill, pick one model based on the target runtime (FastAPI/async framework → async; scripts/CLIs → sync) and make every example in the skill consistent with that choice. + ### Pagination: ItemPaged / AsyncItemPaged ```python @@ -123,18 +161,33 @@ async_poller = await async_client.begin_create_resource(config) result = await async_poller.result() ``` -### Context Managers +### Context Managers (Strongly Preferred) + +**Always prefer context managers (`with` / `async with`) over manually constructing and closing clients.** They guarantee the underlying HTTP transport and credential sessions are closed, even on exceptions, and make the sync/async choice explicit at the call site. ```python -# Recommended pattern +# ✅ Preferred — sync with ConfigurationClient(endpoint, credential) as client: setting = client.get_setting("key") -# Async -async with AsyncConfigurationClient(endpoint, credential) as client: +# ✅ Preferred — async (also wrap the async credential) +from azure.identity.aio import DefaultAzureCredential + +async with DefaultAzureCredential() as credential, \ + AsyncConfigurationClient(endpoint, credential) as client: setting = await client.get_setting("key") + +# ⚠️ Only acceptable when the client lifetime spans the whole app +# (e.g., FastAPI lifespan, long-running service). Close it explicitly. +client = ConfigurationClient(endpoint, credential) +try: + setting = client.get_setting("key") +finally: + client.close() # or `await client.close()` for async clients ``` +Skills should show the context-manager form first. Only introduce the explicit `close()` pattern when the scenario genuinely requires a long-lived client (e.g., dependency-injected singletons), and always pair it with `try/finally` or a framework lifecycle hook. + ### Error Handling ```python diff --git a/docs/index.html b/docs/index.html index 015701d5..7a8c819e 100644 --- a/docs/index.html +++ b/docs/index.html @@ -20,7 +20,7 @@ Docs GitHub -
⌘K
Showing 140 skills

agent-framework-azure-ai-py

Python

Build Azure AI Foundry agents using the Microsoft Agent Framework Python SDK (agent-framework-azure-ai). Use when creating persistent agents with AzureAIAgentsProvider, using hosted tools (code interpreter, file search, web search), integrating MCP servers, managing conversation threads, or implementing streaming responses. Covers function tools, structured outputs, and multi-tool agents.

general

agents-v2-py

Python

Maintain legacy initial-preview Foundry container agents built with Azure AI Projects. +

⌘K
Showing 140 skills

agent-framework-azure-ai-py

Python

Build Azure AI Foundry agents using the Microsoft Agent Framework Python SDK (agent-framework-azure-ai). Use when creating persistent agents with AzureAIAgentsProvider, using hosted tools (code interpreter, file search, web search), integrating MCP servers, managing conversation threads, or implementing streaming responses. Covers function tools, structured outputs, and multi-tool agents.

general

agents-v2-py

Python

Maintain legacy initial-preview Foundry container agents built with Azure AI Projects. Use only for pre-refreshed hosted-agent implementations. For current hosted agents with Responses or Invocations protocols, use hosted-agents-v2-py. Triggers: "legacy container agent", "initial preview hosted agent",