diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 73686adb..952dea4f 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c33c788946fa446bfcf90b60f68abde9 + docChecksum: 52e6a68dbdc371fd28af0f2ae79505fa docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.3 - configChecksum: 0f65a9bdd8df5ae03eaaaea3ab055bf1 + releaseVersion: 1.9.6 + configChecksum: 2b88c684b4750a8f781b81adb8480b58 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -326,7 +326,6 @@ generatedFiles: - docs/models/messageoutputeventrole.md - docs/models/messageoutputeventtype.md - docs/models/messages.md - - docs/models/metadata.md - docs/models/metricout.md - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index ffc6c827..140d66de 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.3 + version: 1.9.6 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 45143669..6b8e984b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,45 +2,44 @@ speakeasyVersion: 1.568.2 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:fe4edf8ff3f2b6695fdcba82e27556b47447c101ff328555b5867b646a50e100 + sourceBlobDigest: sha256:f3b20e0832703d86d5fb58ad8d5e63f43ac61e7cdaec34457796c831e13601e3 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:a24f9e044dff30d6ee8ff4e357565006cbccb6ff2ecb5f819fcfd97638d3e78a + sourceBlobDigest: sha256:bdc7d7691faf96dc402136d8e188a13c151d303be37f84cb92ead45d2665d32c tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d + sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f tags: - latest - - speakeasy-sdk-regen-1753290410 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:fe4edf8ff3f2b6695fdcba82e27556b47447c101ff328555b5867b646a50e100 + sourceBlobDigest: sha256:f3b20e0832703d86d5fb58ad8d5e63f43ac61e7cdaec34457796c831e13601e3 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e242a7fc42e44d2bbc8e5637d4a6455da7fb3d0307dc275ee4c64867f5c4be55 + codeSamplesRevisionDigest: sha256:c31de11ee68db94fdc98b77bea208dc196a6607f36ce841afab236ba3f3eaf92 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:a24f9e044dff30d6ee8ff4e357565006cbccb6ff2ecb5f819fcfd97638d3e78a + sourceBlobDigest: sha256:bdc7d7691faf96dc402136d8e188a13c151d303be37f84cb92ead45d2665d32c codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 + codeSamplesRevisionDigest: sha256:ece8cb2d2432e57967b33b464f2b1b5e788b406db117f76038a9e6f4e666357a mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d + sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:1fd9897fdd851557c592b8fd46232518359401d15a6574933c43be63ec2edb53 + codeSamplesRevisionDigest: sha256:c07eaaf90f2adb1b4d84a30270071d366fcdbff6b5e18d108dec69f751a52f53 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index 5dc74a89..b2144e08 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -33,3 +33,9 @@ value: models.DocumentURLChunk = /* values here */ value: models.ToolReferenceChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index 2da475f7..b06d4a7f 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -33,3 +33,9 @@ value: models.DocumentURLChunk = /* values here */ value: models.ToolReferenceChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 43e09050..3819236b 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,10 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 20484120..c41b8454 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Libraries API - manage access to a library. + ### Available Operations * [list](#list) - List all of the access to this library. diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 05ae6f74..0b49c05c 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Libraries API - manage documents in a library. + ### Available Operations * [list](#list) - List document in a given library. diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 9c709d0b..4b441c85 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -3,7 +3,7 @@ ## Overview -(beta) Libraries API for indexing documents to enhance agent capabilities. +(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities. ### Available Operations diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index 5a82b069..f2ea8c39 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index b7d6f3ba..3893a18f 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,18 +1,19 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 574e96caec9a63dbe3f39d646830f2c2 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 3a4d9b204b5731c461ed7279710d5ed6 + docChecksum: d17526c21541c0521ee90699ca89d7ad + docVersion: 1.0.0 + speakeasyVersion: 1.568.2 + generationVersion: 2.634.2 + releaseVersion: 2.0.2 + configChecksum: 2b64bb6ef3657d2f0d3eab47e1464ac8 published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.19.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -21,14 +22,14 @@ features: globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.1.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -68,6 +69,8 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/metadata.md + - docs/models/mistralpromptmode.md - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -98,7 +101,6 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -126,6 +128,7 @@ generatedFiles: - src/mistralai_azure/models/imageurl.py - src/mistralai_azure/models/imageurlchunk.py - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/mistralpromptmode.py - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py @@ -144,12 +147,12 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py - src/mistralai_azure/utils/__init__.py - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/datetimes.py - src/mistralai_azure/utils/enums.py - src/mistralai_azure/utils/eventstreaming.py - src/mistralai_azure/utils/forms.py @@ -171,15 +174,14 @@ examples: responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 +examplesVersion: 1.0.2 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index be4a1781..6caa22c4 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,17 +12,23 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 2.0.2 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 authors: - Mistral + baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. @@ -43,8 +50,10 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output packageName: mistralai_azure + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index a9a174fb..b0f05d37 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -13,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/mistralai_azure/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_azure/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index b3e06e7a..90397dec 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -13,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/mistralai_azure/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_azure/docs/models/completionchunk.md +++ b/packages/mistralai_azure/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_azure/docs/models/jsonschema.md +++ b/packages/mistralai_azure/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/metadata.md b/packages/mistralai_azure/docs/models/metadata.md similarity index 100% rename from docs/models/metadata.md rename to packages/mistralai_azure/docs/models/metadata.md diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/mistralai_azure/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/packages/mistralai_azure/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_azure/docs/models/tool.md +++ b/packages/mistralai_azure/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 574be1ea..43e09050 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -3,9 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_azure/docs/models/toolchoice.md +++ b/packages/mistralai_azure/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/mistralai_azure/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_azure/docs/models/usageinfo.md +++ b/packages/mistralai_azure/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 266bc815..a8fcb932 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index cca906a7..43fdb02b 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,16 +1,14 @@ [project] name = "mistralai_azure" -version = "1.6.0" +version = "2.0.2" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" +readme = "README.md" +requires-python = ">=3.9.2" dependencies = [ - "eval-type-backport >=0.2.0", + "httpcore >=1.0.9", "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", + "pydantic >=2.11.2", ] [tool.poetry] @@ -26,11 +24,10 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] in-project = true [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.15.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" [build-system] requires = ["poetry-core"] @@ -42,6 +39,8 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_azure/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index f2f2cf2c..1ee7194c 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,4 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py - poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 297dfa2f..0c22d7eb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_azure.httpclient import HttpClient +from mistralai_azure.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 65696610..eeaa6940 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai_azure" +__version__: str = "2.0.2" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.634.2" +__user_agent__: str = "speakeasy-sdk/python 2.0.2 2.634.2 1.0.0 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 24e4935e..84738ce8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -218,12 +218,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,9 +237,7 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -257,7 +255,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -277,9 +275,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +290,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,9 +309,7 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -333,7 +327,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -355,8 +349,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index cf3511fd..20184014 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -41,6 +41,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -67,6 +68,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -106,6 +108,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -138,6 +141,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -212,6 +216,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -238,6 +243,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -277,6 +283,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -309,6 +316,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -391,6 +399,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -415,6 +424,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -456,6 +466,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -488,6 +499,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -566,6 +578,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -590,6 +603,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -631,6 +645,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -663,6 +678,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 1e426352..47b052cb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 2229c469..8d36ddb9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,109 +1,122 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from typing import TYPE_CHECKING +from importlib import import_module +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .tool import Tool, ToolTypedDict + from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -164,6 +177,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "Metadata", + "MetadataTypedDict", + "MistralPromptMode", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -208,3 +224,136 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "Metadata": ".toolcall", + "MetadataTypedDict": ".toolcall", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 530b33df..86f5ec09 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index f48c1f50..8dffe1bd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai_azure.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -96,6 +98,8 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -144,6 +148,11 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -165,16 +174,24 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index ecd85d5c..7a66f322 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 50cf1f01..5fced93e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai_azure.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -91,6 +93,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -138,6 +142,11 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -159,16 +168,24 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 37294d9b..1a492204 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -38,7 +38,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 112eb127..7fa3c3f2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py index 8faa272b..a5a66360 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py index b2d07d3a..0f7563fc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py new file mode 100644 index 00000000..bd4584a5 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index cfd58dcf..6d09de5b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -37,7 +37,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 6ccdcaa2..40216701 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -3,18 +3,34 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_azure.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict +class MetadataTypedDict(TypedDict): + pass + + +class Metadata(BaseModel): + pass + + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] + metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -27,3 +43,35 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 + + metadata: OptionalNullable[Metadata] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["id", "type", "index", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 3e9aa3da..abca8abe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index b1d094fc..bbe5cdfa 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing_extensions import TypedDict +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 8cce1745..05976fc0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 605e5d74..51289cf0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py +++ b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 3cded8fe..dd4aa4b3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -1,50 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +60,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +103,82 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py b/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index 0472aba8..e873495f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index baa41fbd..76e44d71 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,13 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +227,22 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result + diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 5a82b069..f2ea8c39 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 5e157235..83de00d9 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,18 +1,19 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 28fe1ab59b4dee005217f2dbbd836060 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 66bf5911f59189922e03a75a72923b32 + docChecksum: c01cfc13b9e60701ce05c06c9d8ba0f7 + docVersion: 1.0.0 + speakeasyVersion: 1.568.2 + generationVersion: 2.634.2 + releaseVersion: 2.0.2 + configChecksum: c77c9fa3c478f7a98bb4c38b0103091b published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.19.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -21,14 +22,14 @@ features: globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.1.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -73,6 +74,8 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/metadata.md + - docs/models/mistralpromptmode.md - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -103,7 +106,6 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -135,6 +137,7 @@ generatedFiles: - src/mistralai_gcp/models/imageurl.py - src/mistralai_gcp/models/imageurlchunk.py - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/mistralpromptmode.py - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py @@ -153,12 +156,12 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py - src/mistralai_gcp/utils/__init__.py - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/datetimes.py - src/mistralai_gcp/utils/enums.py - src/mistralai_gcp/utils/eventstreaming.py - src/mistralai_gcp/utils/forms.py @@ -180,14 +183,13 @@ examples: responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: @@ -197,15 +199,14 @@ examples: responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 +examplesVersion: 1.0.2 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index d7be7fed..568b008d 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,11 +12,16 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 2.0.2 additionalDependencies: dev: pytest: ^8.2.2 @@ -25,6 +31,7 @@ python: requests: ^2.32.3 authors: - Mistral + baseErrorName: MistralGcpError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. @@ -46,8 +53,10 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output packageName: mistralai-gcp + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 9d735d08..02a3a069 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -6,13 +6,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -20,4 +20,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 827943cd..cad020f1 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -6,13 +6,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -20,4 +20,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_gcp/docs/models/completionchunk.md +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..7b785cf0 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -6,12 +6,12 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..d49a6301 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -6,12 +6,12 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/metadata.md b/packages/mistralai_gcp/docs/models/metadata.md new file mode 100644 index 00000000..e655f580 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/metadata.md @@ -0,0 +1,7 @@ +# Metadata + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/mistralai_gcp/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..43e09050 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -3,9 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_gcp/docs/models/usageinfo.md +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 266bc815..a8fcb932 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 9763e417..e237cd24 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,18 +1,16 @@ [project] name = "mistralai-gcp" -version = "1.6.0" +version = "2.0.2" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" +readme = "README.md" +requires-python = ">=3.9.2" dependencies = [ - "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", + "httpcore >=1.0.9", "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", + "pydantic >=2.11.2", "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", ] [tool.poetry] @@ -28,11 +26,10 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] in-project = true [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.15.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" [build-system] requires = ["poetry-core"] @@ -44,6 +41,8 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_gcp/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index f2f2cf2c..1ee7194c 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,4 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py - poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..f8088f4c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient +from mistralai_gcp.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 11f38b63..1b685766 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" +__version__: str = "2.0.2" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.634.2" +__user_agent__: str = "speakeasy-sdk/python 2.0.2 2.634.2 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..5bf4fe12 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -218,12 +218,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,9 +237,7 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -257,7 +255,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -277,9 +275,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +290,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,9 +309,7 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -333,7 +327,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -355,8 +349,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index dba369bf..e4932474 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -41,6 +41,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -66,6 +67,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -104,6 +106,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -135,6 +138,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -209,6 +213,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -234,6 +239,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,6 +278,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -303,6 +310,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -385,6 +393,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -408,6 +417,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -448,6 +458,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -479,6 +490,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -557,6 +569,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -580,6 +593,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -620,6 +634,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -651,6 +666,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..cc2c0f9b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -105,6 +105,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -243,6 +244,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -381,6 +383,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -515,6 +518,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..47b052cb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 752e70e6..6dacf2ee 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,122 +1,138 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from typing import TYPE_CHECKING +from importlib import import_module +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .tool import Tool, ToolTypedDict + from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -187,6 +203,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "Metadata", + "MetadataTypedDict", + "MistralPromptMode", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -231,3 +250,146 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "Metadata": ".toolcall", + "MetadataTypedDict": ".toolcall", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..794b8c80 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index a0125c35..7fbce9b4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -96,6 +98,8 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionRequest(BaseModel): @@ -142,6 +146,11 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -159,15 +168,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 0404a9d2..a7953eb1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 656f1d58..e07c5a1d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -91,6 +93,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionStreamRequest(BaseModel): @@ -136,6 +140,11 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -153,15 +162,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 8d779971..1be7dbdc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -38,7 +38,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index f9f0868b..1801ac76 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..9357f7a8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -110,7 +110,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index a4d273a2..e1940b0a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class FIMCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class FIMCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..0a6c82c2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -108,7 +108,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py index e7aa11f0..20d4ba77 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py index 2529ce31..26914b2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py new file mode 100644 index 00000000..3f4de0fa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..34193895 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -37,7 +37,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index ecbac8d6..bcf40b85 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -3,18 +3,34 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_gcp.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict +class MetadataTypedDict(TypedDict): + pass + + +class Metadata(BaseModel): + pass + + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] + metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -27,3 +43,35 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 + + metadata: OptionalNullable[Metadata] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["id", "type", "index", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index 886b6ff1..bd187b32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 9de6af7e..59f36158 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 287bb1b4..1f9a1630 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..cf85c47e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..dd4aa4b3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -1,50 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +60,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +103,82 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 0472aba8..e873495f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..76e44d71 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,13 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +227,22 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result + diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 5937a745..e5fd40e6 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.3" +__version__: str = "1.9.6" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.3 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.6 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index 67061b7e..f5f5b446 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -9,6 +9,8 @@ class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + def list( self, *, diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index 3408d943..5201022e 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -13,7 +13,7 @@ class Beta(BaseSDK): agents: MistralAgents r"""(beta) Agents API""" libraries: Libraries - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index e43d3faf..c28758d2 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -9,6 +9,8 @@ class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + def list( self, *, diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py index 45bf0397..852f6997 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/libraries.py @@ -12,10 +12,12 @@ class Libraries(BaseSDK): - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 2039c2b6..d24492d6 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -777,7 +777,7 @@ ) from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict - from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict + from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( @@ -1383,8 +1383,6 @@ "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", - "Metadata", - "MetadataTypedDict", "MetricOut", "MetricOutTypedDict", "MistralPromptMode", @@ -2172,8 +2170,6 @@ "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", - "Metadata": ".toolcall", - "MetadataTypedDict": ".toolcall", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", "ToolChoice": ".toolchoice", diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py index e83fb3a9..136a7608 100644 --- a/src/mistralai/models/messageoutputcontentchunks.py +++ b/src/mistralai/models/messageoutputcontentchunks.py @@ -4,6 +4,7 @@ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -16,6 +17,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -25,6 +27,11 @@ MessageOutputContentChunks = TypeAliasType( "MessageOutputContentChunks", Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, ], ) diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py index 6b7e39ea..ad0c087e 100644 --- a/src/mistralai/models/outputcontentchunks.py +++ b/src/mistralai/models/outputcontentchunks.py @@ -4,6 +4,7 @@ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -16,6 +17,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -25,6 +27,11 @@ OutputContentChunks = TypeAliasType( "OutputContentChunks", Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, ], ) diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 7d3a3c6b..92dbb4a9 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -3,28 +3,18 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel from mistralai.utils import validate_open_enum -from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class MetadataTypedDict(TypedDict): - pass - - -class Metadata(BaseModel): - pass - - class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] - metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -37,35 +27,3 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 - - metadata: OptionalNullable[Metadata] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["id", "type", "index", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m