diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index c95f2c1b..0bbf7126 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -27,7 +27,7 @@ jobs: - name: Install dependencies run: | touch README-PYPI.md - poetry install + poetry install --all-extras # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion - name: Run all linters diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index e55ca08d..7d8eb792 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -28,12 +28,6 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - - name: Build and install client - run: | - touch README-PYPI.md # Create this file since the client is not built from Speakeasy - poetry build - python3 -m pip install dist/mistralai-*.whl - - name: Set VERSION run: | VERSION=$(echo ${{ matrix.python-version }} | tr -d .) @@ -43,20 +37,27 @@ jobs: run: | echo "MISTRAL_API_KEY=${{ secrets[format('CI_MISTRAL_API_KEY_PYTHON_{0}', env.VERSION)] }}" >> $GITHUB_ENV - - name: Run the example scripts + - name: Build the package + run: | + touch README-PYPI.md # Create this file since the client is not built by Speakeasy + poetry build + + - name: For python 3.9, install the client and run examples without extra dependencies. + if: matrix.python-version == '3.9' + run: | + PACKAGE="dist/$(ls dist | grep whl | head -n 1)" + python3 -m pip install "$PACKAGE" + ./scripts/run_examples.sh --no-extra-dep + env: + MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} + MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} + + - name: For python 3.10+, install client with extras and run all examples. + if: matrix.python-version != '3.9' run: | - failed=0 - for file in examples/*.py; do - if [ -f "$file" ] && [ "$file" != "examples/chatbot_with_streaming.py" ]; then - echo "Running $file" - # Do not fail if the script fails, but save it in the failed variable - python3 "$file" > /dev/null || failed=1 - fi - done - # If one of the example script failed then exit - if [ $failed -ne 0 ]; then - exit 1 - fi + PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" + python3 -m pip install "$PACKAGE" + ./scripts/run_examples.sh env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 1a9287fc..80be7b20 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 3589e9f1ea5775264c5c8e0887b4ea0e + docChecksum: e9c447db719018a5721988252c09c2dc docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.7.1 - configChecksum: d35541d61057b11258d7d56bbc5c5260 + releaseVersion: 1.8.0 + configChecksum: 1f7adfac0b677cdca4c073a11cbcef02 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -45,6 +45,32 @@ generatedFiles: - .python-version - .vscode/settings.json - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md - docs/models/agentscompletionrequest.md - docs/models/agentscompletionrequestmessages.md - docs/models/agentscompletionrequeststop.md @@ -53,6 +79,9 @@ generatedFiles: - docs/models/agentscompletionstreamrequestmessages.md - docs/models/agentscompletionstreamrequeststop.md - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md - docs/models/apiendpoint.md - docs/models/archiveftmodelout.md - docs/models/archiveftmodeloutobject.md @@ -68,6 +97,7 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md - docs/models/chatclassificationrequest.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md @@ -101,6 +131,10 @@ generatedFiles: - docs/models/classifiertargetout.md - docs/models/classifiertrainingparameters.md - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md - docs/models/completionchunk.md - docs/models/completiondetailedjobout.md - docs/models/completiondetailedjoboutintegrations.md @@ -112,24 +146,50 @@ generatedFiles: - docs/models/completionftmodelout.md - docs/models/completionftmodeloutobject.md - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md - docs/models/completionresponsestreamchoice.md - docs/models/completionresponsestreamchoicefinishreason.md - docs/models/completiontrainingparameters.md - docs/models/completiontrainingparametersin.md - docs/models/content.md - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md - docs/models/data.md - docs/models/deletefileout.md - docs/models/deletemodelout.md - docs/models/deletemodelv1modelsmodeliddeleterequest.md - docs/models/deltamessage.md - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md + - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md - docs/models/filepurpose.md @@ -154,17 +214,32 @@ generatedFiles: - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md - docs/models/githubrepositoryin.md - docs/models/githubrepositoryintype.md - docs/models/githubrepositoryout.md - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md - docs/models/httpvalidationerror.md - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md - docs/models/imageurl.md - docs/models/imageurlchunk.md - docs/models/imageurlchunkimageurl.md - docs/models/imageurlchunktype.md + - docs/models/inputentries.md - docs/models/inputs.md - docs/models/instructrequest.md - docs/models/instructrequestinputs.md @@ -199,9 +274,28 @@ generatedFiles: - docs/models/legacyjobmetadataoutobject.md - docs/models/listfilesout.md - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md - docs/models/messages.md - docs/models/metricout.md - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md - docs/models/modellist.md - docs/models/modeltype.md - docs/models/moderationobject.md @@ -214,14 +308,23 @@ generatedFiles: - docs/models/ocrresponse.md - docs/models/ocrusageinfo.md - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md - docs/models/prediction.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md - docs/models/responseformat.md - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md - docs/models/retrievefileout.md - docs/models/retrievemodelv1modelsmodelidgetrequest.md - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md @@ -229,6 +332,7 @@ generatedFiles: - docs/models/sampletype.md - docs/models/security.md - docs/models/source.md + - docs/models/ssetypes.md - docs/models/status.md - docs/models/stop.md - docs/models/systemmessage.md @@ -239,9 +343,21 @@ generatedFiles: - docs/models/toolcall.md - docs/models/toolchoice.md - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md - docs/models/toolmessage.md - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md - docs/models/tooltypes.md - docs/models/trainingfile.md - docs/models/two.md @@ -260,16 +376,23 @@ generatedFiles: - docs/models/wandbintegrationout.md - docs/models/wandbintegrationouttype.md - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md - docs/sdks/agents/README.md - docs/sdks/batch/README.md + - docs/sdks/beta/README.md - docs/sdks/chat/README.md - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md - docs/sdks/embeddings/README.md - docs/sdks/files/README.md - docs/sdks/fim/README.md - docs/sdks/finetuning/README.md - docs/sdks/jobs/README.md - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - docs/sdks/ocr/README.md @@ -286,18 +409,40 @@ generatedFiles: - src/mistralai/agents.py - src/mistralai/basesdk.py - src/mistralai/batch.py + - src/mistralai/beta.py - src/mistralai/chat.py - src/mistralai/classifiers.py + - src/mistralai/conversations.py - src/mistralai/embeddings.py - src/mistralai/files.py - src/mistralai/fim.py - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py - src/mistralai/jobs.py + - src/mistralai/mistral_agents.py - src/mistralai/mistral_jobs.py - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py - src/mistralai/models/agentscompletionrequest.py - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py @@ -307,6 +452,7 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py @@ -324,6 +470,9 @@ generatedFiles: - src/mistralai/models/classifiertargetout.py - src/mistralai/models/classifiertrainingparameters.py - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py - src/mistralai/models/completionchunk.py - src/mistralai/models/completiondetailedjobout.py - src/mistralai/models/completionevent.py @@ -333,10 +482,23 @@ generatedFiles: - src/mistralai/models/completiontrainingparameters.py - src/mistralai/models/completiontrainingparametersin.py - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - src/mistralai/models/deletefileout.py - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py @@ -360,12 +522,19 @@ generatedFiles: - src/mistralai/models/ftmodelcard.py - src/mistralai/models/function.py - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py - src/mistralai/models/githubrepositoryin.py - src/mistralai/models/githubrepositoryout.py - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py - src/mistralai/models/imageurl.py - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py - src/mistralai/models/inputs.py - src/mistralai/models/instructrequest.py - src/mistralai/models/jobin.py @@ -385,8 +554,15 @@ generatedFiles: - src/mistralai/models/jsonschema.py - src/mistralai/models/legacyjobmetadataout.py - src/mistralai/models/listfilesout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py - src/mistralai/models/modellist.py - src/mistralai/models/moderationobject.py - src/mistralai/models/moderationresponse.py @@ -396,23 +572,33 @@ generatedFiles: - src/mistralai/models/ocrrequest.py - src/mistralai/models/ocrresponse.py - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py - src/mistralai/models/prediction.py - src/mistralai/models/referencechunk.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py - src/mistralai/models/responseformat.py - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - src/mistralai/models/retrievefileout.py - src/mistralai/models/sampletype.py - src/mistralai/models/sdkerror.py - src/mistralai/models/security.py - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py - src/mistralai/models/textchunk.py - src/mistralai/models/tool.py - src/mistralai/models/toolcall.py - src/mistralai/models/toolchoice.py - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py - src/mistralai/models/unarchiveftmodelout.py @@ -423,6 +609,8 @@ generatedFiles: - src/mistralai/models/validationerror.py - src/mistralai/models/wandbintegration.py - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py - src/mistralai/models_.py - src/mistralai/ocr.py - src/mistralai/py.typed @@ -735,5 +923,160 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} "422": application/json: {} + agents_api_v1_conversations_start: + speakeasy-default-agents-api-v1-conversations-start: + requestBody: + application/json: {"inputs": "", "stream": false} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_reference", "tool": "web_search_premium", "title": ""}, {"document_url": "https://unrealistic-fund.org/", "type": "document_url"}]}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_list: + speakeasy-default-agents-api-v1-conversations-list: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"object": "conversation", "id": "", "created_at": "2025-01-13T10:26:00.433Z", "updated_at": "2023-07-14T18:23:27.528Z", "agent_id": ""}, {"object": "conversation", "id": "", "created_at": "2023-06-17T12:14:27.999Z", "updated_at": "2024-11-27T13:02:27.296Z", "model": "LeBaron"}, {"object": "conversation", "id": "", "created_at": "2025-02-26T06:14:46.641Z", "updated_at": "2023-04-05T09:49:38.010Z", "model": "A8"}] + "422": + application/json: {} + agents_api_v1_conversations_get: + speakeasy-default-agents-api-v1-conversations-get: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2024-09-04T11:33:52.011Z", "updated_at": "2024-08-19T11:11:04.610Z", "agent_id": ""} + "422": + application/json: {} + agents_api_v1_conversations_append: + speakeasy-default-agents-api-v1-conversations-append: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server"} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_history: + speakeasy-default-agents-api-v1-conversations-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_file", "tool": "web_search", "file_id": ""}]}]} + "422": + application/json: {} + agents_api_v1_conversations_messages: + speakeasy-default-agents-api-v1-conversations-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": [{"object": "entry", "type": "message.input", "role": "assistant", "content": ""}, {"object": "entry", "type": "message.input", "role": "assistant", "content": [{"document_url": "https://black-and-white-sauerkraut.biz", "type": "document_url"}, {"type": "tool_file", "tool": "code_interpreter", "file_id": ""}, {"image_url": "https://emotional-couch.org", "type": "image_url"}]}, {"object": "entry", "type": "message.input", "role": "assistant", "content": ""}]} + "422": + application/json: {} + agents_api_v1_conversations_restart: + speakeasy-default-agents-api-v1-conversations-restart: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "tool.execution", "name": "image_generation"}, {"object": "entry", "type": "tool.execution", "name": "web_search_premium"}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_start_stream: + speakeasy-default-agents-api-v1-conversations-start-stream: + requestBody: + application/json: {"inputs": "", "stream": true} + responses: + "422": + application/json: {} + agents_api_v1_conversations_append_stream: + speakeasy-default-agents-api-v1-conversations-append-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + responses: + "422": + application/json: {} + agents_api_v1_conversations_restart_stream: + speakeasy-default-agents-api-v1-conversations-restart-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "422": + application/json: {} + agents_api_v1_agents_create: + speakeasy-default-agents-api-v1-agents-create: + requestBody: + application/json: {"model": "Fiesta", "name": ""} + responses: + "200": + application/json: {"model": "LeBaron", "name": "", "object": "agent", "id": "", "version": 417458, "created_at": "2023-05-28T06:20:22.766Z", "updated_at": "2023-03-17T15:39:20.911Z"} + "422": + application/json: {} + agents_api_v1_agents_list: + speakeasy-default-agents-api-v1-agents-list: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"model": "Golf", "name": "", "object": "agent", "id": "", "version": 678317, "created_at": "2023-07-14T18:23:27.528Z", "updated_at": "2023-09-09T18:28:08.953Z"}, {"model": "Aventador", "name": "", "object": "agent", "id": "", "version": 635532, "created_at": "2024-12-01T18:25:37.169Z", "updated_at": "2023-01-20T06:21:22.156Z"}, {"model": "Model T", "name": "", "object": "agent", "id": "", "version": 86140, "created_at": "2023-03-17T01:57:00.187Z", "updated_at": "2025-01-24T00:05:25.844Z"}] + "422": + application/json: {} + agents_api_v1_agents_get: + speakeasy-default-agents-api-v1-agents-get: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"model": "Model S", "name": "", "object": "agent", "id": "", "version": 558834, "created_at": "2024-08-19T11:11:04.610Z", "updated_at": "2024-07-25T06:33:15.810Z"} + "422": + application/json: {} + agents_api_v1_agents_update: + speakeasy-default-agents-api-v1-agents-update: + parameters: + path: + agent_id: "" + requestBody: + application/json: {} + responses: + "200": + application/json: {"model": "Sentra", "name": "", "object": "agent", "id": "", "version": 597129, "created_at": "2024-01-13T16:52:57.274Z", "updated_at": "2025-12-22T15:27:45.882Z"} + "422": + application/json: {} + agents_api_v1_agents_update_version: + speakeasy-default-agents-api-v1-agents-update-version: + parameters: + path: + agent_id: "" + query: + version: 193920 + responses: + "200": + application/json: {"model": "Mercielago", "name": "", "object": "agent", "id": "", "version": 253661, "created_at": "2023-02-14T22:44:06.703Z", "updated_at": "2025-12-15T06:22:04.120Z"} + "422": + application/json: {} examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 35d79fdf..820ed567 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.7.1 + version: 1.8.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 1d3e05a8..d0361942 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 + sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 tags: - latest - - speakeasy-sdk-regen-1747926206 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 + sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:a130e7408a5dd6edaaba35712518861618aefc523eb03f15d7b53b9bfd085c5b + codeSamplesRevisionDigest: sha256:bd4031e558c0426c02f2a4f3bb1642068047aa555e0f9cbbc70de74ff7ec04ec workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/README.md b/README.md index d5b265b6..b8926d7b 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,18 @@ Once that is saved to a file, you can run it with `uv run script.py` where `script.py` can be replaced with the actual file name. +### Agents extra dependencies + +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +installing the package: + +```bash +pip install "mistralai[agents]" +``` + +> Note: Because of some of our dependencies, these features are only available for python version higher or equal to +> 3.10. + ## SDK Example Usage @@ -432,6 +444,30 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job * [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job +### [beta](docs/sdks/beta/README.md) + + +#### [beta.agents](docs/sdks/mistralagents/README.md) + +* [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/mistralagents/README.md#list) - List agent entities. +* [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. +* [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. + +#### [beta.conversations](docs/sdks/conversations/README.md) + +* [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. +* [list](docs/sdks/conversations/README.md#list) - List all created conversations. +* [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. +* [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. +* [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. +* [restart](docs/sdks/conversations/README.md#restart) - Restart a conversation starting from a given entry. +* [start_stream](docs/sdks/conversations/README.md#start_stream) - Create a conversation and append entries to it. +* [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. +* [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. + ### [chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -511,12 +547,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.stream(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ]) + res = mistral.beta.conversations.start_stream(inputs="") with res as event_stream: for event in event_stream: diff --git a/RELEASES.md b/RELEASES.md index 744e3312..fc9229a9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -208,4 +208,14 @@ Based on: ### Generated - [python v1.7.1] . ### Releases -- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . \ No newline at end of file +- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . + +## 2025-05-26 11:05:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.0] . +### Releases +- [PyPI v1.8.0] https://pypi.org/project/mistralai/1.8.0 - . \ No newline at end of file diff --git a/docs/models/agent.md b/docs/models/agent.md new file mode 100644 index 00000000..9a64fb68 --- /dev/null +++ b/docs/models/agent.md @@ -0,0 +1,19 @@ +# Agent + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md new file mode 100644 index 00000000..93dde728 --- /dev/null +++ b/docs/models/agentconversation.md @@ -0,0 +1,14 @@ +# AgentConversation + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationobject.md b/docs/models/agentconversationobject.md new file mode 100644 index 00000000..ea7cc75c --- /dev/null +++ b/docs/models/agentconversationobject.md @@ -0,0 +1,8 @@ +# AgentConversationObject + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md new file mode 100644 index 00000000..324ff25c --- /dev/null +++ b/docs/models/agentcreationrequest.md @@ -0,0 +1,14 @@ +# AgentCreationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttools.md b/docs/models/agentcreationrequesttools.md new file mode 100644 index 00000000..c2525850 --- /dev/null +++ b/docs/models/agentcreationrequesttools.md @@ -0,0 +1,41 @@ +# AgentCreationRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md new file mode 100644 index 00000000..a8a74ec0 --- /dev/null +++ b/docs/models/agenthandoffdoneevent.md @@ -0,0 +1,13 @@ +# AgentHandoffDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneeventtype.md b/docs/models/agenthandoffdoneeventtype.md new file mode 100644 index 00000000..c864ce43 --- /dev/null +++ b/docs/models/agenthandoffdoneeventtype.md @@ -0,0 +1,8 @@ +# AgentHandoffDoneEventType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `AGENT_HANDOFF_DONE` | agent.handoff.done | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md new file mode 100644 index 00000000..327f8048 --- /dev/null +++ b/docs/models/agenthandoffentry.md @@ -0,0 +1,16 @@ +# AgentHandoffEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentryobject.md b/docs/models/agenthandoffentryobject.md new file mode 100644 index 00000000..4bb876fb --- /dev/null +++ b/docs/models/agenthandoffentryobject.md @@ -0,0 +1,8 @@ +# AgentHandoffEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/agenthandoffentrytype.md b/docs/models/agenthandoffentrytype.md new file mode 100644 index 00000000..527ebceb --- /dev/null +++ b/docs/models/agenthandoffentrytype.md @@ -0,0 +1,8 @@ +# AgentHandoffEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `AGENT_HANDOFF` | agent.handoff | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md new file mode 100644 index 00000000..f99ed45d --- /dev/null +++ b/docs/models/agenthandoffstartedevent.md @@ -0,0 +1,13 @@ +# AgentHandoffStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedeventtype.md b/docs/models/agenthandoffstartedeventtype.md new file mode 100644 index 00000000..4ffaff15 --- /dev/null +++ b/docs/models/agenthandoffstartedeventtype.md @@ -0,0 +1,8 @@ +# AgentHandoffStartedEventType + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `AGENT_HANDOFF_STARTED` | agent.handoff.started | \ No newline at end of file diff --git a/docs/models/agentobject.md b/docs/models/agentobject.md new file mode 100644 index 00000000..70e143b0 --- /dev/null +++ b/docs/models/agentobject.md @@ -0,0 +1,8 @@ +# AgentObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `AGENT` | agent | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md new file mode 100644 index 00000000..b46ac23d --- /dev/null +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md new file mode 100644 index 00000000..b5bcee62 --- /dev/null +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsListRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md new file mode 100644 index 00000000..f60f8e5b --- /dev/null +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_update_request` | [models.AgentUpdateRequest](../models/agentupdaterequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdateversionrequest.md b/docs/models/agentsapiv1agentsupdateversionrequest.md new file mode 100644 index 00000000..e937acc9 --- /dev/null +++ b/docs/models/agentsapiv1agentsupdateversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendrequest.md b/docs/models/agentsapiv1conversationsappendrequest.md new file mode 100644 index 00000000..ac8a00ec --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_request` | [models.ConversationAppendRequest](../models/conversationappendrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendstreamrequest.md b/docs/models/agentsapiv1conversationsappendstreamrequest.md new file mode 100644 index 00000000..dbc330f1 --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_stream_request` | [models.ConversationAppendStreamRequest](../models/conversationappendstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md new file mode 100644 index 00000000..0d2d7827 --- /dev/null +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md b/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md new file mode 100644 index 00000000..4bc836f3 --- /dev/null +++ b/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md @@ -0,0 +1,19 @@ +# AgentsAPIV1ConversationsGetResponseV1ConversationsGet + +Successful Response + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md new file mode 100644 index 00000000..f0d4f049 --- /dev/null +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsHistoryRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md new file mode 100644 index 00000000..528a055a --- /dev/null +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsListRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md new file mode 100644 index 00000000..b3189925 --- /dev/null +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsMessagesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md new file mode 100644 index 00000000..11a2fe2e --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md new file mode 100644 index 00000000..4cbb9d6c --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenttools.md b/docs/models/agenttools.md new file mode 100644 index 00000000..15891f56 --- /dev/null +++ b/docs/models/agenttools.md @@ -0,0 +1,41 @@ +# AgentTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md new file mode 100644 index 00000000..9da03d03 --- /dev/null +++ b/docs/models/agentupdaterequest.md @@ -0,0 +1,14 @@ +# AgentUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTools](../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequesttools.md b/docs/models/agentupdaterequesttools.md new file mode 100644 index 00000000..1752ee68 --- /dev/null +++ b/docs/models/agentupdaterequesttools.md @@ -0,0 +1,41 @@ +# AgentUpdateRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/builtinconnectors.md b/docs/models/builtinconnectors.md new file mode 100644 index 00000000..f96f5044 --- /dev/null +++ b/docs/models/builtinconnectors.md @@ -0,0 +1,12 @@ +# BuiltInConnectors + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `WEB_SEARCH` | web_search | +| `WEB_SEARCH_PREMIUM` | web_search_premium | +| `CODE_INTERPRETER` | code_interpreter | +| `IMAGE_GENERATION` | image_generation | +| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md new file mode 100644 index 00000000..d5ad789e --- /dev/null +++ b/docs/models/codeinterpretertool.md @@ -0,0 +1,8 @@ +# CodeInterpreterTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | [Optional[models.CodeInterpreterToolType]](../models/codeinterpretertooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertooltype.md b/docs/models/codeinterpretertooltype.md new file mode 100644 index 00000000..f704b65e --- /dev/null +++ b/docs/models/codeinterpretertooltype.md @@ -0,0 +1,8 @@ +# CodeInterpreterToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `CODE_INTERPRETER` | code_interpreter | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md new file mode 100644 index 00000000..5f07b673 --- /dev/null +++ b/docs/models/completionargs.md @@ -0,0 +1,19 @@ +# CompletionArgs + +White-listed arguments from the completion API + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionargsstop.md b/docs/models/completionargsstop.md new file mode 100644 index 00000000..b93f993e --- /dev/null +++ b/docs/models/completionargsstop.md @@ -0,0 +1,17 @@ +# CompletionArgsStop + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index 381aeb94..7f30f58c 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -14,7 +14,7 @@ | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/completionjoboutobject.md b/docs/models/completionjoboutobject.md new file mode 100644 index 00000000..712b107d --- /dev/null +++ b/docs/models/completionjoboutobject.md @@ -0,0 +1,10 @@ +# CompletionJobOutObject + +The object type of the fine-tuning job. + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md new file mode 100644 index 00000000..1cdb584b --- /dev/null +++ b/docs/models/conversationappendrequest.md @@ -0,0 +1,12 @@ +# ConversationAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationappendrequesthandoffexecution.md b/docs/models/conversationappendrequesthandoffexecution.md new file mode 100644 index 00000000..7418b36a --- /dev/null +++ b/docs/models/conversationappendrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md new file mode 100644 index 00000000..a8516ea7 --- /dev/null +++ b/docs/models/conversationappendstreamrequest.md @@ -0,0 +1,12 @@ +# ConversationAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequesthandoffexecution.md b/docs/models/conversationappendstreamrequesthandoffexecution.md new file mode 100644 index 00000000..1bbced3e --- /dev/null +++ b/docs/models/conversationappendstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationevents.md b/docs/models/conversationevents.md new file mode 100644 index 00000000..f1e2c4e9 --- /dev/null +++ b/docs/models/conversationevents.md @@ -0,0 +1,9 @@ +# ConversationEvents + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `event` | [models.SSETypes](../models/ssetypes.md) | :heavy_check_mark: | Server side events sent when streaming a conversation response. | +| `data` | [models.ConversationEventsData](../models/conversationeventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationeventsdata.md b/docs/models/conversationeventsdata.md new file mode 100644 index 00000000..81faf197 --- /dev/null +++ b/docs/models/conversationeventsdata.md @@ -0,0 +1,59 @@ +# ConversationEventsData + + +## Supported Types + +### `models.AgentHandoffDoneEvent` + +```python +value: models.AgentHandoffDoneEvent = /* values here */ +``` + +### `models.AgentHandoffStartedEvent` + +```python +value: models.AgentHandoffStartedEvent = /* values here */ +``` + +### `models.ResponseDoneEvent` + +```python +value: models.ResponseDoneEvent = /* values here */ +``` + +### `models.ResponseErrorEvent` + +```python +value: models.ResponseErrorEvent = /* values here */ +``` + +### `models.ResponseStartedEvent` + +```python +value: models.ResponseStartedEvent = /* values here */ +``` + +### `models.FunctionCallEvent` + +```python +value: models.FunctionCallEvent = /* values here */ +``` + +### `models.MessageOutputEvent` + +```python +value: models.MessageOutputEvent = /* values here */ +``` + +### `models.ToolExecutionDoneEvent` + +```python +value: models.ToolExecutionDoneEvent = /* values here */ +``` + +### `models.ToolExecutionStartedEvent` + +```python +value: models.ToolExecutionStartedEvent = /* values here */ +``` + diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md new file mode 100644 index 00000000..8bcef1de --- /dev/null +++ b/docs/models/conversationhistory.md @@ -0,0 +1,12 @@ +# ConversationHistory + +Retrieve all entries in a conversation. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistoryobject.md b/docs/models/conversationhistoryobject.md new file mode 100644 index 00000000..a14e7f9c --- /dev/null +++ b/docs/models/conversationhistoryobject.md @@ -0,0 +1,8 @@ +# ConversationHistoryObject + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `CONVERSATION_HISTORY` | conversation.history | \ No newline at end of file diff --git a/docs/models/conversationinputs.md b/docs/models/conversationinputs.md new file mode 100644 index 00000000..86db40ea --- /dev/null +++ b/docs/models/conversationinputs.md @@ -0,0 +1,17 @@ +# ConversationInputs + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.InputEntries]` + +```python +value: List[models.InputEntries] = /* values here */ +``` + diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md new file mode 100644 index 00000000..f6a5569f --- /dev/null +++ b/docs/models/conversationmessages.md @@ -0,0 +1,12 @@ +# ConversationMessages + +Similar to the conversation history but only keep the messages + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessagesobject.md b/docs/models/conversationmessagesobject.md new file mode 100644 index 00000000..db3a441b --- /dev/null +++ b/docs/models/conversationmessagesobject.md @@ -0,0 +1,8 @@ +# ConversationMessagesObject + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `CONVERSATION_MESSAGES` | conversation.messages | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md new file mode 100644 index 00000000..141533e7 --- /dev/null +++ b/docs/models/conversationrequest.md @@ -0,0 +1,18 @@ +# ConversationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md new file mode 100644 index 00000000..3309a08b --- /dev/null +++ b/docs/models/conversationresponse.md @@ -0,0 +1,13 @@ +# ConversationResponse + +The response after appending new entries to the conversation. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponseobject.md b/docs/models/conversationresponseobject.md new file mode 100644 index 00000000..bea66e52 --- /dev/null +++ b/docs/models/conversationresponseobject.md @@ -0,0 +1,8 @@ +# ConversationResponseObject + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `CONVERSATION_RESPONSE` | conversation.response | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md new file mode 100644 index 00000000..15a6ead4 --- /dev/null +++ b/docs/models/conversationrestartrequest.md @@ -0,0 +1,15 @@ +# ConversationRestartRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationrestartrequesthandoffexecution.md b/docs/models/conversationrestartrequesthandoffexecution.md new file mode 100644 index 00000000..5790624b --- /dev/null +++ b/docs/models/conversationrestartrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md new file mode 100644 index 00000000..30f3767c --- /dev/null +++ b/docs/models/conversationrestartstreamrequest.md @@ -0,0 +1,15 @@ +# ConversationRestartStreamRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequesthandoffexecution.md b/docs/models/conversationrestartstreamrequesthandoffexecution.md new file mode 100644 index 00000000..97266b43 --- /dev/null +++ b/docs/models/conversationrestartstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md new file mode 100644 index 00000000..a571e2af --- /dev/null +++ b/docs/models/conversationstreamrequest.md @@ -0,0 +1,18 @@ +# ConversationStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequesthandoffexecution.md b/docs/models/conversationstreamrequesthandoffexecution.md new file mode 100644 index 00000000..c98e194c --- /dev/null +++ b/docs/models/conversationstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequesttools.md b/docs/models/conversationstreamrequesttools.md new file mode 100644 index 00000000..700c8448 --- /dev/null +++ b/docs/models/conversationstreamrequesttools.md @@ -0,0 +1,41 @@ +# ConversationStreamRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationusageinfo.md b/docs/models/conversationusageinfo.md new file mode 100644 index 00000000..57e26033 --- /dev/null +++ b/docs/models/conversationusageinfo.md @@ -0,0 +1,12 @@ +# ConversationUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `connector_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `connectors` | Dict[str, *int*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md new file mode 100644 index 00000000..bed4e2c5 --- /dev/null +++ b/docs/models/documentlibrarytool.md @@ -0,0 +1,9 @@ +# DocumentLibraryTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | +| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytooltype.md b/docs/models/documentlibrarytooltype.md new file mode 100644 index 00000000..ebd420f6 --- /dev/null +++ b/docs/models/documentlibrarytooltype.md @@ -0,0 +1,8 @@ +# DocumentLibraryToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/entries.md b/docs/models/entries.md new file mode 100644 index 00000000..8e5a20d0 --- /dev/null +++ b/docs/models/entries.md @@ -0,0 +1,41 @@ +# Entries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md new file mode 100644 index 00000000..55665bad --- /dev/null +++ b/docs/models/functioncallentry.md @@ -0,0 +1,15 @@ +# FunctionCallEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryarguments.md b/docs/models/functioncallentryarguments.md new file mode 100644 index 00000000..f1f6e39e --- /dev/null +++ b/docs/models/functioncallentryarguments.md @@ -0,0 +1,17 @@ +# FunctionCallEntryArguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/functioncallentryobject.md b/docs/models/functioncallentryobject.md new file mode 100644 index 00000000..3cf2e427 --- /dev/null +++ b/docs/models/functioncallentryobject.md @@ -0,0 +1,8 @@ +# FunctionCallEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functioncallentrytype.md b/docs/models/functioncallentrytype.md new file mode 100644 index 00000000..7ea34c52 --- /dev/null +++ b/docs/models/functioncallentrytype.md @@ -0,0 +1,8 @@ +# FunctionCallEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `FUNCTION_CALL` | function.call | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md new file mode 100644 index 00000000..a5162090 --- /dev/null +++ b/docs/models/functioncallevent.md @@ -0,0 +1,14 @@ +# FunctionCallEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventtype.md b/docs/models/functioncalleventtype.md new file mode 100644 index 00000000..8cf3f038 --- /dev/null +++ b/docs/models/functioncalleventtype.md @@ -0,0 +1,8 @@ +# FunctionCallEventType + + +## Values + +| Name | Value | +| --------------------- | --------------------- | +| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md new file mode 100644 index 00000000..5cdcf3eb --- /dev/null +++ b/docs/models/functionresultentry.md @@ -0,0 +1,14 @@ +# FunctionResultEntry + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentryobject.md b/docs/models/functionresultentryobject.md new file mode 100644 index 00000000..fe52e0a5 --- /dev/null +++ b/docs/models/functionresultentryobject.md @@ -0,0 +1,8 @@ +# FunctionResultEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functionresultentrytype.md b/docs/models/functionresultentrytype.md new file mode 100644 index 00000000..35c94d8e --- /dev/null +++ b/docs/models/functionresultentrytype.md @@ -0,0 +1,8 @@ +# FunctionResultEntryType + + +## Values + +| Name | Value | +| ----------------- | ----------------- | +| `FUNCTION_RESULT` | function.result | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md new file mode 100644 index 00000000..1332febe --- /dev/null +++ b/docs/models/functiontool.md @@ -0,0 +1,9 @@ +# FunctionTool + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functiontooltype.md b/docs/models/functiontooltype.md new file mode 100644 index 00000000..9c095625 --- /dev/null +++ b/docs/models/functiontooltype.md @@ -0,0 +1,8 @@ +# FunctionToolType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/handoffexecution.md b/docs/models/handoffexecution.md new file mode 100644 index 00000000..61e7dade --- /dev/null +++ b/docs/models/handoffexecution.md @@ -0,0 +1,9 @@ +# HandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md new file mode 100644 index 00000000..b8fc9cf4 --- /dev/null +++ b/docs/models/imagegenerationtool.md @@ -0,0 +1,8 @@ +# ImageGenerationTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | [Optional[models.ImageGenerationToolType]](../models/imagegenerationtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imagegenerationtooltype.md b/docs/models/imagegenerationtooltype.md new file mode 100644 index 00000000..29681b58 --- /dev/null +++ b/docs/models/imagegenerationtooltype.md @@ -0,0 +1,8 @@ +# ImageGenerationToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `IMAGE_GENERATION` | image_generation | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md new file mode 100644 index 00000000..e1e48279 --- /dev/null +++ b/docs/models/inputentries.md @@ -0,0 +1,17 @@ +# InputEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + diff --git a/docs/models/messageentries.md b/docs/models/messageentries.md new file mode 100644 index 00000000..76256fb9 --- /dev/null +++ b/docs/models/messageentries.md @@ -0,0 +1,17 @@ +# MessageEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md new file mode 100644 index 00000000..50795f0e --- /dev/null +++ b/docs/models/messageinputcontentchunks.md @@ -0,0 +1,29 @@ +# MessageInputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md new file mode 100644 index 00000000..a1573ed5 --- /dev/null +++ b/docs/models/messageinputentry.md @@ -0,0 +1,16 @@ +# MessageInputEntry + +Representation of an input message inside the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrycontent.md b/docs/models/messageinputentrycontent.md new file mode 100644 index 00000000..65e55d97 --- /dev/null +++ b/docs/models/messageinputentrycontent.md @@ -0,0 +1,17 @@ +# MessageInputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageInputContentChunks]` + +```python +value: List[models.MessageInputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageinputentryrole.md b/docs/models/messageinputentryrole.md new file mode 100644 index 00000000..f2fdc71d --- /dev/null +++ b/docs/models/messageinputentryrole.md @@ -0,0 +1,9 @@ +# MessageInputEntryRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md new file mode 100644 index 00000000..d3378124 --- /dev/null +++ b/docs/models/messageinputentrytype.md @@ -0,0 +1,8 @@ +# MessageInputEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md new file mode 100644 index 00000000..5dc74a89 --- /dev/null +++ b/docs/models/messageoutputcontentchunks.md @@ -0,0 +1,35 @@ +# MessageOutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md new file mode 100644 index 00000000..224d043d --- /dev/null +++ b/docs/models/messageoutputentry.md @@ -0,0 +1,16 @@ +# MessageOutputEntry + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentrycontent.md b/docs/models/messageoutputentrycontent.md new file mode 100644 index 00000000..5206e4eb --- /dev/null +++ b/docs/models/messageoutputentrycontent.md @@ -0,0 +1,17 @@ +# MessageOutputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageOutputContentChunks]` + +```python +value: List[models.MessageOutputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageoutputentryobject.md b/docs/models/messageoutputentryobject.md new file mode 100644 index 00000000..bb254c82 --- /dev/null +++ b/docs/models/messageoutputentryobject.md @@ -0,0 +1,8 @@ +# MessageOutputEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageoutputentryrole.md b/docs/models/messageoutputentryrole.md new file mode 100644 index 00000000..783ee0aa --- /dev/null +++ b/docs/models/messageoutputentryrole.md @@ -0,0 +1,8 @@ +# MessageOutputEntryRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputentrytype.md b/docs/models/messageoutputentrytype.md new file mode 100644 index 00000000..cb4a7a1b --- /dev/null +++ b/docs/models/messageoutputentrytype.md @@ -0,0 +1,8 @@ +# MessageOutputEntryType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `MESSAGE_OUTPUT` | message.output | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md new file mode 100644 index 00000000..3fe8ac49 --- /dev/null +++ b/docs/models/messageoutputevent.md @@ -0,0 +1,16 @@ +# MessageOutputEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventcontent.md b/docs/models/messageoutputeventcontent.md new file mode 100644 index 00000000..16d8d52f --- /dev/null +++ b/docs/models/messageoutputeventcontent.md @@ -0,0 +1,17 @@ +# MessageOutputEventContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `models.OutputContentChunks` + +```python +value: models.OutputContentChunks = /* values here */ +``` + diff --git a/docs/models/messageoutputeventrole.md b/docs/models/messageoutputeventrole.md new file mode 100644 index 00000000..e38c6472 --- /dev/null +++ b/docs/models/messageoutputeventrole.md @@ -0,0 +1,8 @@ +# MessageOutputEventRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputeventtype.md b/docs/models/messageoutputeventtype.md new file mode 100644 index 00000000..1f43fdcc --- /dev/null +++ b/docs/models/messageoutputeventtype.md @@ -0,0 +1,8 @@ +# MessageOutputEventType + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `MESSAGE_OUTPUT_DELTA` | message.output.delta | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md new file mode 100644 index 00000000..ffedcc0f --- /dev/null +++ b/docs/models/modelconversation.md @@ -0,0 +1,17 @@ +# ModelConversation + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationobject.md b/docs/models/modelconversationobject.md new file mode 100644 index 00000000..ead1fa26 --- /dev/null +++ b/docs/models/modelconversationobject.md @@ -0,0 +1,8 @@ +# ModelConversationObject + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/modelconversationtools.md b/docs/models/modelconversationtools.md new file mode 100644 index 00000000..5cc97437 --- /dev/null +++ b/docs/models/modelconversationtools.md @@ -0,0 +1,41 @@ +# ModelConversationTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/object.md b/docs/models/object.md index ab4c4588..0122c0db 100644 --- a/docs/models/object.md +++ b/docs/models/object.md @@ -1,10 +1,8 @@ # Object -The object type of the fine-tuning job. - ## Values -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md new file mode 100644 index 00000000..2da475f7 --- /dev/null +++ b/docs/models/outputcontentchunks.md @@ -0,0 +1,35 @@ +# OutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/outputs.md b/docs/models/outputs.md new file mode 100644 index 00000000..7756c627 --- /dev/null +++ b/docs/models/outputs.md @@ -0,0 +1,29 @@ +# Outputs + + +## Supported Types + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/responsebody.md b/docs/models/responsebody.md new file mode 100644 index 00000000..8a218517 --- /dev/null +++ b/docs/models/responsebody.md @@ -0,0 +1,17 @@ +# ResponseBody + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md new file mode 100644 index 00000000..b33fa52c --- /dev/null +++ b/docs/models/responsedoneevent.md @@ -0,0 +1,10 @@ +# ResponseDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneeventtype.md b/docs/models/responsedoneeventtype.md new file mode 100644 index 00000000..58f7f44d --- /dev/null +++ b/docs/models/responsedoneeventtype.md @@ -0,0 +1,8 @@ +# ResponseDoneEventType + + +## Values + +| Name | Value | +| ---------------------------- | ---------------------------- | +| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md new file mode 100644 index 00000000..e730b7c4 --- /dev/null +++ b/docs/models/responseerrorevent.md @@ -0,0 +1,11 @@ +# ResponseErrorEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseerroreventtype.md b/docs/models/responseerroreventtype.md new file mode 100644 index 00000000..3b3fc303 --- /dev/null +++ b/docs/models/responseerroreventtype.md @@ -0,0 +1,8 @@ +# ResponseErrorEventType + + +## Values + +| Name | Value | +| ----------------------------- | ----------------------------- | +| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | \ No newline at end of file diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md new file mode 100644 index 00000000..7bd02b3e --- /dev/null +++ b/docs/models/responsestartedevent.md @@ -0,0 +1,10 @@ +# ResponseStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedeventtype.md b/docs/models/responsestartedeventtype.md new file mode 100644 index 00000000..2d9273bd --- /dev/null +++ b/docs/models/responsestartedeventtype.md @@ -0,0 +1,8 @@ +# ResponseStartedEventType + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | \ No newline at end of file diff --git a/docs/models/ssetypes.md b/docs/models/ssetypes.md new file mode 100644 index 00000000..08d0f662 --- /dev/null +++ b/docs/models/ssetypes.md @@ -0,0 +1,18 @@ +# SSETypes + +Server side events sent when streaming a conversation response. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | +| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | +| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | +| `MESSAGE_OUTPUT_DELTA` | message.output.delta | +| `TOOL_EXECUTION_STARTED` | tool.execution.started | +| `TOOL_EXECUTION_DONE` | tool.execution.done | +| `AGENT_HANDOFF_STARTED` | agent.handoff.started | +| `AGENT_HANDOFF_DONE` | agent.handoff.done | +| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md new file mode 100644 index 00000000..d6d28ce2 --- /dev/null +++ b/docs/models/toolexecutiondoneevent.md @@ -0,0 +1,13 @@ +# ToolExecutionDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventtype.md b/docs/models/toolexecutiondoneeventtype.md new file mode 100644 index 00000000..872624c1 --- /dev/null +++ b/docs/models/toolexecutiondoneeventtype.md @@ -0,0 +1,8 @@ +# ToolExecutionDoneEventType + + +## Values + +| Name | Value | +| --------------------- | --------------------- | +| `TOOL_EXECUTION_DONE` | tool.execution.done | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md new file mode 100644 index 00000000..8422a8fd --- /dev/null +++ b/docs/models/toolexecutionentry.md @@ -0,0 +1,14 @@ +# ToolExecutionEntry + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryobject.md b/docs/models/toolexecutionentryobject.md new file mode 100644 index 00000000..0ca79af5 --- /dev/null +++ b/docs/models/toolexecutionentryobject.md @@ -0,0 +1,8 @@ +# ToolExecutionEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/toolexecutionentrytype.md b/docs/models/toolexecutionentrytype.md new file mode 100644 index 00000000..a67629b8 --- /dev/null +++ b/docs/models/toolexecutionentrytype.md @@ -0,0 +1,8 @@ +# ToolExecutionEntryType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `TOOL_EXECUTION` | tool.execution | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md new file mode 100644 index 00000000..4b03f94c --- /dev/null +++ b/docs/models/toolexecutionstartedevent.md @@ -0,0 +1,12 @@ +# ToolExecutionStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventtype.md b/docs/models/toolexecutionstartedeventtype.md new file mode 100644 index 00000000..56695d1f --- /dev/null +++ b/docs/models/toolexecutionstartedeventtype.md @@ -0,0 +1,8 @@ +# ToolExecutionStartedEventType + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `TOOL_EXECUTION_STARTED` | tool.execution.started | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md new file mode 100644 index 00000000..236d2f41 --- /dev/null +++ b/docs/models/toolfilechunk.md @@ -0,0 +1,12 @@ +# ToolFileChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktype.md b/docs/models/toolfilechunktype.md new file mode 100644 index 00000000..7e99acef --- /dev/null +++ b/docs/models/toolfilechunktype.md @@ -0,0 +1,8 @@ +# ToolFileChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `TOOL_FILE` | tool_file | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md new file mode 100644 index 00000000..fb4b46a6 --- /dev/null +++ b/docs/models/toolreferencechunk.md @@ -0,0 +1,12 @@ +# ToolReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `source` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktype.md b/docs/models/toolreferencechunktype.md new file mode 100644 index 00000000..bc57d277 --- /dev/null +++ b/docs/models/toolreferencechunktype.md @@ -0,0 +1,8 @@ +# ToolReferenceChunkType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `TOOL_REFERENCE` | tool_reference | \ No newline at end of file diff --git a/docs/models/tools.md b/docs/models/tools.md new file mode 100644 index 00000000..f308d732 --- /dev/null +++ b/docs/models/tools.md @@ -0,0 +1,41 @@ +# Tools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md new file mode 100644 index 00000000..941fc2b8 --- /dev/null +++ b/docs/models/websearchpremiumtool.md @@ -0,0 +1,8 @@ +# WebSearchPremiumTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | [Optional[models.WebSearchPremiumToolType]](../models/websearchpremiumtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtooltype.md b/docs/models/websearchpremiumtooltype.md new file mode 100644 index 00000000..348bfe85 --- /dev/null +++ b/docs/models/websearchpremiumtooltype.md @@ -0,0 +1,8 @@ +# WebSearchPremiumToolType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `WEB_SEARCH_PREMIUM` | web_search_premium | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md new file mode 100644 index 00000000..c8d708bd --- /dev/null +++ b/docs/models/websearchtool.md @@ -0,0 +1,8 @@ +# WebSearchTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | [Optional[models.WebSearchToolType]](../models/websearchtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtooltype.md b/docs/models/websearchtooltype.md new file mode 100644 index 00000000..57b6acbb --- /dev/null +++ b/docs/models/websearchtooltype.md @@ -0,0 +1,8 @@ +# WebSearchToolType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `WEB_SEARCH` | web_search | \ No newline at end of file diff --git a/docs/sdks/beta/README.md b/docs/sdks/beta/README.md new file mode 100644 index 00000000..f5b5f822 --- /dev/null +++ b/docs/sdks/beta/README.md @@ -0,0 +1,6 @@ +# Beta +(*beta*) + +## Overview + +### Available Operations diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md new file mode 100644 index 00000000..b5c12b24 --- /dev/null +++ b/docs/sdks/conversations/README.md @@ -0,0 +1,466 @@ +# Conversations +(*beta.conversations*) + +## Overview + +### Available Operations + +* [start](#start) - Create a conversation and append entries to it. +* [list](#list) - List all created conversations. +* [get](#get) - Retrieve a conversation information. +* [append](#append) - Append new entries to an existing conversation. +* [get_history](#get_history) - Retrieve all entries in a conversation. +* [get_messages](#get_messages) - Retrieve all messages in a conversation. +* [restart](#restart) - Restart a conversation starting from a given entry. +* [start_stream](#start_stream) - Create a conversation and append entries to it. +* [append_stream](#append_stream) - Append new entries to an existing conversation. +* [restart_stream](#restart_stream) - Restart a conversation starting from a given entry. + +## start + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start(inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of conversation entities sorted by creation time. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.ResponseBody]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a conversation_id retrieve a conversation entity with its attributes. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet](../../models/agentsapiv1conversationsgetresponsev1conversationsget.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## append + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append(conversation_id="", inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_history + +Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_history(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationHistory](../../models/conversationhistory.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_messages + +Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_messages(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationMessages](../../models/conversationmessages.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## restart + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## start_stream + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start_stream(inputs="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## append_stream + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append_stream(conversation_id="", inputs="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## restart_stream + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md new file mode 100644 index 00000000..aeb2b917 --- /dev/null +++ b/docs/sdks/mistralagents/README.md @@ -0,0 +1,227 @@ +# MistralAgents +(*beta.agents*) + +## Overview + +### Available Operations + +* [create](#create) - Create a agent that can be used within a conversation. +* [list](#list) - List agent entities. +* [get](#get) - Retrieve an agent entity. +* [update](#update) - Update an agent entity. +* [update_version](#update_version) - Update an agent version. + +## create + +Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create(model="Fiesta", name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTools](../../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of agent entities sorted by creation time. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given an agent retrieve an agent entity with its attributes. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update an agent attributes and create a new version. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTools](../../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update_version + +Switch the version of an agent. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update_version(agent_id="", version=193920) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index ecb42257..efadff89 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -10,7 +10,7 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] - model = "pixtral-12b" + model = "pixtral-12b-2409" client = Mistral(api_key=api_key) chat_response = await client.chat.complete_async( @@ -21,7 +21,7 @@ async def main(): {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://cms.mistral.ai/assets/af26a11d-0793-439f-a06e-7694b24b8270", + "image_url": "https://cms.mistral.ai/assets/a64b3821-3a4c-4d4d-b718-d653f3eb7a5e.png?", }, ] ) diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py index 736c47a0..1ef500ae 100755 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -20,6 +20,7 @@ async def main(): UserMessage(content="What is the best French cheese?give the best 50") ], ) + assert response async for chunk in response: if chunk.data.choices[0].delta.content is not None: print(chunk.data.choices[0].delta.content, end="") diff --git a/examples/async_conversation_agent.py b/examples/async_conversation_agent.py new file mode 100644 index 00000000..54f002ac --- /dev/null +++ b/examples/async_conversation_agent.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai import Mistral + +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + agent = client.beta.agents.create( + model=MODEL, + name="WebSearch Agent", + instructions="Use your websearch abilities when answering requests you don't know.", + description="Agent able to fetch new information on the web.", + tools = [{"type": "web_search"}], + ) + + result = await client.beta.conversations.start_async( + agent_id=agent.id, + inputs="Who won the last Champions League?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + result = await client.beta.conversations.append_async( + conversation_id=result.conversation_id, + inputs="And what about the previous year?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run.py b/examples/async_conversation_run.py new file mode 100644 index 00000000..9e118037 --- /dev/null +++ b/examples/async_conversation_run.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.types import BaseModel + +MODEL = "mistral-medium-latest" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp.py b/examples/async_conversation_run_mcp.py new file mode 100644 index 00000000..0e373715 --- /dev/null +++ b/examples/async_conversation_run_mcp.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import ( + MCPClientSTDIO, +) +from pathlib import Path + +from mistralai.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main() -> None: + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + continue_on_fn_error=True, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp_remote.py b/examples/async_conversation_run_mcp_remote.py new file mode 100644 index 00000000..7b2f46a6 --- /dev/null +++ b/examples/async_conversation_run_mcp_remote.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from pathlib import Path + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# this one does not require auth: https://remote.mcpservers.org/edgeone-pages/mcp + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.semgrep.ai/sse" + mcp_client = MCPClientSSE(sse_params=SSEServerParams(url=server_url, timeout=100)) + + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Can you write a hello_world.py and check for security vulnerabilities", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp_remote_auth.py b/examples/async_conversation_run_mcp_remote_auth.py new file mode 100644 index 00000000..f69d8096 --- /dev/null +++ b/examples/async_conversation_run_mcp_remote_auth.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +import asyncio +from http.server import BaseHTTPRequestHandler, HTTPServer +import os +import threading +import webbrowser + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from mistralai.extra.mcp.auth import build_oauth_params + +MODEL = "mistral-medium-latest" + +CALLBACK_PORT = 16010 + + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# - https://support.anthropic.com/en/articles/11176164-pre-built-integrations-using-remote-mcp +# this one has auth: https://mcp.linear.app/sse + + +def run_callback_server(callback_func): + auth_response: dict = {"url": ""} + + class OAuthCallbackHandler(BaseHTTPRequestHandler): + server_version = "HTTP" + code = None + + def do_GET(self): + if "/callback" in self.path: + try: + auth_response["url"] = self.path + self.send_response(200) + self.send_header("Content-type", "text/html") + self.end_headers() + callback_func() + response_html = "

You may now close this window.

" + self.wfile.write(response_html.encode()) + threading.Thread(target=httpd.shutdown).start() + except Exception: + self.send_response(500) + self.end_headers() + + server_address = ("localhost", CALLBACK_PORT) + httpd = HTTPServer(server_address, OAuthCallbackHandler) + threading.Thread(target=httpd.serve_forever).start() + redirect_url = f"http://localhost:{CALLBACK_PORT}/oauth/callback" + return httpd, redirect_url, auth_response + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.linear.app/sse" + + # set-up the client + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ) + ) + + callback_event = asyncio.Event() + event_loop = asyncio.get_event_loop() + + # check if auth is required + if await mcp_client.requires_auth(): + # let's login + httpd, redirect_url, auth_response = run_callback_server( + callback_func=lambda: event_loop.call_soon_threadsafe(callback_event.set) + ) + try: + # First create the required oauth config, this means fetching the server metadata and registering a client + oauth_params = await build_oauth_params( + mcp_client.base_url, redirect_url=redirect_url + ) + mcp_client.set_oauth_params(oauth_params=oauth_params) + login_url, state = await mcp_client.get_auth_url_and_state(redirect_url) + + # The oauth params like client_id, client_secret would generally be saved in some persistent storage. + # The oauth state and token would be saved in a user session. + + # wait for the user to complete the authentication process + print("Please go to this URL and authorize the application:", login_url) + webbrowser.open(login_url, new=2) + await callback_event.wait() + + # in a real app this would be your oauth2 callback route you would get the code from the query params, + # verify the state, and then get the token + # Here we recreate a new client with the saved params which and exchange the code for a token + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ), + oauth_params=oauth_params, + ) + + token = await mcp_client.get_token_from_auth_response( + auth_response["url"], redirect_url=redirect_url, state=state + ) + mcp_client.set_auth_token(token) + + except Exception as e: + print(f"Error during authentication: {e}") + finally: + httpd.shutdown() + httpd.server_close() + + # Now it's possible to make a query to the mcp server as we would do without authentication + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me which projects do I have in my workspace?", + ) + + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_stream.py b/examples/async_conversation_run_stream.py new file mode 100644 index 00000000..1e6ad87b --- /dev/null +++ b/examples/async_conversation_run_stream.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import MCPClientSTDIO +from pathlib import Path + +from mistralai.extra.run.result import RunResult +from mistralai.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + events = await client.beta.conversations.run_stream_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + run_result = None + async for event in events: + if isinstance(event, RunResult): + run_result = event + else: + print(event) + + if not run_result: + raise RuntimeError("not run result found") + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index 84327b32..80e598c7 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -1,7 +1,9 @@ #!/usr/bin/env python - import asyncio +import json import os +import random +from pathlib import Path from mistralai import Mistral from mistralai.models import ( @@ -11,46 +13,93 @@ POLLING_INTERVAL = 10 +cwd = Path(__file__).parent + +user_contents = [ + "How far is the Moon from Earth?", + "What's the largest ocean on Earth?", + "How many continents are there?", + "What's the powerhouse of the cell?", + "What's the speed of light?", + "Can you solve a Rubik's Cube?", + "What is the tallest mountain in the world?", + "Who painted the Mona Lisa?", +] + +# List of assistant contents +assistant_contents = [ + "Around 384,400 kilometers. Give or take a few, like that really matters.", + "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal.", + "There are seven continents. I hope that wasn't too hard to count.", + "The mitochondria. Remember that from high school biology?", + "Approximately 299,792 kilometers per second. You know, faster than your internet speed.", + "I could if I had hands. What's your excuse?", + "Mount Everest, standing at 29,029 feet. You know, just a little hill.", + "Leonardo da Vinci. Just another guy who liked to doodle.", +] + +system_message = "Marv is a factual chatbot that is also sarcastic" + +def create_validation_file() -> bytes: + return json.dumps({ + "messages": [ + {"role": "user", "content": "How long does it take to travel around the Earth?"}, + {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."} + ], + "temperature": random.random() + }).encode() async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) + requests = [] + for um, am in zip( + random.sample(user_contents, len(user_contents)), + random.sample(assistant_contents, len(assistant_contents)), + ): + requests.append(json.dumps({ + "messages": [ + {"role": "system", "content": system_message}, + {"role": "user", "content": um}, + {"role": "assistant", "content": am}, + ] + })) + # Create new files - with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: - training_file = await client.files.upload_async( - file=File(file_name="file.jsonl", content=f) - ) - with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: - validation_file = await client.files.upload_async( - file=File(file_name="validation_file.jsonl", content=f) - ) + training_file = await client.files.upload_async( + file=File( + file_name="file.jsonl", content=("\n".join(requests)).encode() + ), + purpose="fine-tune", + ) + + validation_file = await client.files.upload_async( + file=File( + file_name="validation_file.jsonl", content=create_validation_file() + ), + purpose="fine-tune", + ) # Create a new job created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( - training_steps=2, + training_steps=1, learning_rate=0.0001, ), ) - print(created_job) - while created_job.status in [ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - ]: + while created_job.status in ["RUNNING", "STARTED", "QUEUED", "VALIDATING", "VALIDATED"]: created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) - if created_job.status != "SUCCESS": + if created_job.status == "FAILED": print("Job failed") raise Exception(f"Job failed with {created_job.status}") + print(created_job) # Chat with model response = await client.chat.complete_async( diff --git a/examples/async_structured_outputs.py b/examples/async_structured_outputs.py index 560934e9..a512d38f 100644 --- a/examples/async_structured_outputs.py +++ b/examples/async_structured_outputs.py @@ -5,7 +5,6 @@ from pydantic import BaseModel from mistralai import Mistral -from typing import List async def main(): @@ -17,7 +16,7 @@ class Explanation(BaseModel): output: str class MathDemonstration(BaseModel): - steps: List[Explanation] + steps: list[Explanation] final_answer: str chat_response = await client.chat.parse_async( diff --git a/examples/azure/az_chat_no_streaming.py.py b/examples/azure/az_chat_no_streaming.py.py new file mode 100644 index 00000000..485b594e --- /dev/null +++ b/examples/azure/az_chat_no_streaming.py.py @@ -0,0 +1,16 @@ +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +res = client.chat.complete( + messages=[ + {"role": "user", "content": "What is the capital of France?"}, + ], + # you don't need model as it will always be "azureai" +) +print(res.choices[0].message.content) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index 19d48a15..66b167f1 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -16,7 +16,6 @@ def main(): model=model, messages=[UserMessage(content="What is the best French cheese?")], ): - print(chunk.data.choices[0].delta.content, end="") diff --git a/examples/function_calling.py b/examples/function_calling.py index e7eba594..aba7d671 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -90,35 +90,46 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: messages = [UserMessage(content="What's the status of my transaction?")] -response = client.chat.complete(model=model, messages=messages, tools=tools) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat.complete(model=model, messages=messages, tools=tools) -messages.append(response.choices[0].message) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) -for tool_call in response.choices[0].message.tool_calls: +tool_call = response.choices[0].message.tool_calls[0] +function_name = tool_call.function.name +function_params = json.loads(tool_call.function.arguments) - function_name = tool_call.function.name - function_params = json.loads(tool_call.function.arguments) +print( + f"calling function_name: {function_name}, with function_params: {function_params}" +) - print( - f"calling function_name: {function_name}, with function_params: {function_params}" - ) +function_result = names_to_functions[function_name](**function_params) - function_result =names_to_functions[function_name](**function_params) - messages.append( - ToolMessage( - name=function_name, - content=function_result, - tool_call_id=tool_call.id, - ) +messages.append( + AssistantMessage( + content=response.choices[0].message.content, + tool_calls=response.choices[0].message.tool_calls, + ) +) +messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, ) +) print(messages) -response = client.chat.complete(model=model, messages=messages, tools=tools) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) print(f"{response.choices[0].message.content}") diff --git a/examples/gcp/gcp_async_chat_no_streaming.py b/examples/gcp/gcp_async_chat_no_streaming.py new file mode 100755 index 00000000..178f151c --- /dev/null +++ b/examples/gcp/gcp_async_chat_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai_gcp import MistralGoogleCloud +from mistralai_gcp.models.usermessage import UserMessage + + +async def main(): + model = "mistral-large-2407" + + client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) + + chat_response = await client.chat.complete_async( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mcp_servers/sse_server.py b/examples/mcp_servers/sse_server.py new file mode 100644 index 00000000..66edb98b --- /dev/null +++ b/examples/mcp_servers/sse_server.py @@ -0,0 +1,32 @@ +import random +import threading +from contextlib import contextmanager + +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_sse_server(): + mcp.run(transport="sse") + + +@contextmanager +def run_sse_server_in_background(): + """start the server in a new thread""" + thread = threading.Thread(target=run_sse_server, daemon=True) + thread.start() + yield thread + + +if __name__ == "__main__": + run_sse_server() diff --git a/examples/mcp_servers/stdio_server.py b/examples/mcp_servers/stdio_server.py new file mode 100644 index 00000000..f95ac8dc --- /dev/null +++ b/examples/mcp_servers/stdio_server.py @@ -0,0 +1,21 @@ +import random +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_stdio_server(): + mcp.run(transport="stdio") + + +if __name__ == "__main__": + run_stdio_server() diff --git a/examples/structured_outputs.py b/examples/structured_outputs.py new file mode 100644 index 00000000..bc4a5e18 --- /dev/null +++ b/examples/structured_outputs.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print( + "\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n" + ) + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index 3a645e5e..9ffdc439 100644 --- a/poetry.lock +++ b/poetry.lock @@ -50,6 +50,22 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} +[[package]] +name = "authlib" +version = "1.6.0" +description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"agents\"" +files = [ + {file = "authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d"}, + {file = "authlib-1.6.0.tar.gz", hash = "sha256:4367d32031b7af175ad3a323d571dc7257b7099d55978087ceae4a0d88cd3210"}, +] + +[package.dependencies] +cryptography = "*" + [[package]] name = "cachetools" version = "5.5.0" @@ -75,6 +91,87 @@ files = [ {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] +markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} + +[package.dependencies] +pycparser = "*" + [[package]] name = "charset-normalizer" version = "3.4.0" @@ -191,18 +288,146 @@ files = [ {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] +[[package]] +name = "click" +version = "8.2.1" +description = "Composable command line interface toolkit" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "extra == \"agents\"", dev = "sys_platform == \"win32\""} + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] +markers = {main = "python_version < \"3.11\" and extra == \"agents\"", dev = "python_version < \"3.11\""} + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cryptography" +version = "45.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.7" +groups = ["main", "dev"] +files = [ + {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, + {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, + {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, + {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, + {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, + {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, + {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, +] +markers = {main = "python_version >= \"3.11\" and extra == \"agents\"", dev = "python_version >= \"3.11\""} + +[package.dependencies] +cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] +pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] [[package]] name = "dill" @@ -276,6 +501,22 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] +[[package]] +name = "griffe" +version = "1.7.3" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"agents\"" +files = [ + {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, + {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, +] + +[package.dependencies] +colorama = ">=0.4" + [[package]] name = "h11" version = "0.14.0" @@ -335,6 +576,19 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + [[package]] name = "idna" version = "3.10" @@ -389,6 +643,35 @@ files = [ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] +[[package]] +name = "mcp" +version = "1.9.1" +description = "Model Context Protocol SDK" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "mcp-1.9.1-py3-none-any.whl", hash = "sha256:2900ded8ffafc3c8a7bfcfe8bc5204037e988e753ec398f371663e6a06ecd9a9"}, + {file = "mcp-1.9.1.tar.gz", hash = "sha256:19879cd6dde3d763297617242888c2f695a95dfa854386a6a68676a646ce75e4"}, +] + +[package.dependencies] +anyio = ">=4.5" +httpx = ">=0.27" +httpx-sse = ">=0.4" +pydantic = ">=2.7.2,<3.0.0" +pydantic-settings = ">=2.5.2" +python-multipart = ">=0.0.9" +sse-starlette = ">=1.6.1" +starlette = ">=0.27" +uvicorn = {version = ">=0.23.1", markers = "sys_platform != \"emscripten\""} + +[package.extras] +cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] +rich = ["rich (>=13.9.4)"] +ws = ["websockets (>=15.0.1)"] + [[package]] name = "mypy" version = "1.15.0" @@ -541,6 +824,19 @@ files = [ [package.dependencies] pyasn1 = ">=0.4.6,<0.7.0" +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] +markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} + [[package]] name = "pydantic" version = "2.10.6" @@ -675,6 +971,31 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pydantic-settings" +version = "2.9.1" +description = "Settings management using Pydantic" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, + {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" +typing-inspection = ">=0.4.0" + +[package.extras] +aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + [[package]] name = "pylint" version = "3.2.3" @@ -784,6 +1105,35 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.1.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, + {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.20" +description = "A streaming multipart parser for Python" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, + {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, +] + [[package]] name = "requests" version = "2.32.3" @@ -874,6 +1224,46 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +[[package]] +name = "sse-starlette" +version = "2.1.3" +description = "SSE plugin for Starlette" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"}, + {file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"}, +] + +[package.dependencies] +anyio = "*" +starlette = "*" +uvicorn = "*" + +[package.extras] +examples = ["fastapi"] + +[[package]] +name = "starlette" +version = "0.46.2" +description = "The little ASGI library that shines." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, + {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + [[package]] name = "tomli" version = "2.2.1" @@ -929,6 +1319,21 @@ files = [ {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, ] +[[package]] +name = "types-authlib" +version = "1.5.0.20250516" +description = "Typing stubs for Authlib" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_authlib-1.5.0.20250516-py3-none-any.whl", hash = "sha256:c553659ba00b7e5f98d1bc183a47224a882de5d32c07917b1587a6a22ddd2583"}, + {file = "types_authlib-1.5.0.20250516.tar.gz", hash = "sha256:6d11b46622c4c338087d059e9036887408c788cf254f0fb11ff69f2a85ca7231"}, +] + +[package.dependencies] +cryptography = "*" + [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -987,10 +1392,32 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.34.2" +description = "The lightning-fast ASGI server." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, + {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [extras] +agents = ["authlib", "griffe", "mcp"] gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "54b1df325389d0398ad0ecb64575548aff40b97d220b0002780d48bee3c90846" +content-hash = "f111068ee90dcada908f5064a1ed67f027a728ababa2bb6bd9e6957957fc5c6c" diff --git a/pyproject.toml b/pyproject.toml index f429e1e4..1050042e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.7.1" +version = "1.8.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" @@ -34,6 +34,7 @@ pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" +types-authlib = "^1.5.0.20250516" [tool.poetry.group.lint.dependencies] ruff = "^0.11.10" @@ -46,6 +47,11 @@ gcp = [ "google-auth >=2.27.0", "requests >=2.32.3" ] +agents = [ + "mcp >=1.0,<2.0; python_version >= '3.10'", + "griffe >=1.7.3,<2.0", + "authlib >=1.5.2,<2.0", +] [build-system] requires = ["poetry-core"] diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index dca05562..163bb3a6 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -7,19 +7,26 @@ ERRORS=0 echo "Running mypy..." # TODO: Uncomment once the examples are fixed # poetry run mypy examples/ || ERRORS=1 +echo "-> running on extra" poetry run mypy src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run mypy src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "Running pyright..." # TODO: Uncomment once the examples are fixed # poetry run pyright examples/ || ERRORS=1 +echo "-> running on extra" poetry run pyright src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run pyright src/mistralai/_hooks/ || ERRORS=1 echo "Running ruff..." +echo "-> running on examples" poetry run ruff check examples/ || ERRORS=1 +echo "-> running on extra" poetry run ruff check src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run ruff check src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh new file mode 100755 index 00000000..a48d34af --- /dev/null +++ b/scripts/run_examples.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# List of files to exclude +exclude_files=( + "examples/chatbot_with_streaming.py" + "examples/async_conversation_run_mcp_remote_auth.py" +) + +# Check if the first argument is "no-extra-dep" then remove all the files that require the extra dependencies +if [ "$1" = "--no-extra-dep" ]; then + # Add more files to the exclude list + exclude_files+=( + "examples/async_conversation_run_mcp_remote.py" + "examples/async_conversation_run_mcp.py" + "examples/async_conversation_run_stream.py" + "examples/async_conversation_run.py" + ) +fi + +failed=0 + +for file in examples/*.py; do + # Check if the file is not in the exclude list + if [ -f "$file" ] && [[ ! " ${exclude_files[@]} " =~ " $file " ]]; then + echo "Running $file" + # Run the script and capture the exit status + if python3 "$file" > /dev/null; then + echo "Success" + else + echo "Failed" + failed=1 + fi + else + echo "Skipped $file" + fi +done + +# If one of the example scripts failed, then exit +if [ $failed -ne 0 ]; then + exit 1 +fi diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index ade7e11c..de6b8db8 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.7.1" +__version__: str = "1.8.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py new file mode 100644 index 00000000..6858b0a8 --- /dev/null +++ b/src/mistralai/beta.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.conversations import Conversations +from mistralai.mistral_agents import MistralAgents + + +class Beta(BaseSDK): + conversations: Conversations + agents: MistralAgents + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations(self.sdk_configuration) + self.agents = MistralAgents(self.sdk_configuration) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py new file mode 100644 index 00000000..6e4b37ee --- /dev/null +++ b/src/mistralai/conversations.py @@ -0,0 +1,2657 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, List, Mapping, Optional, Union + +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements + +logger = logging.getLogger(__name__) + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + + + +class Conversations(BaseSDK): + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + + def start( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def start_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def append( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationHistory) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationHistory) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationMessages) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationMessages) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def restart( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def restart_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def start_stream( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationStreamRequestTools], + List[models.ConversationStreamRequestToolsTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def start_stream_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationStreamRequestTools], + List[models.ConversationStreamRequestToolsTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def restart_stream( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def restart_stream_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py index d8f7a21a..d9a81d24 100644 --- a/src/mistralai/extra/__init__.py +++ b/src/mistralai/extra/__init__.py @@ -1,5 +1,13 @@ -from .struct_chat import ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response +from .struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) from .utils import response_format_from_pydantic_model from .utils.response_format import CustomPydanticModel -__all__ = ["convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse"] +__all__ = [ + "convert_to_parsed_chat_completion_response", + "response_format_from_pydantic_model", + "CustomPydanticModel", + "ParsedChatCompletionResponse", +] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py new file mode 100644 index 00000000..7853ddc2 --- /dev/null +++ b/src/mistralai/extra/exceptions.py @@ -0,0 +1,14 @@ +class MistralClientException(Exception): + """Base exception for all the client errors.""" + + +class RunException(MistralClientException): + """Exception raised for errors during a conversation run.""" + + +class MCPException(MistralClientException): + """Exception raised for errors related to MCP operations.""" + + +class MCPAuthException(MCPException): + """Exception raised for authentication errors with an MCP server.""" diff --git a/src/mistralai/extra/mcp/__init__.py b/src/mistralai/extra/mcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py new file mode 100644 index 00000000..909f5d4a --- /dev/null +++ b/src/mistralai/extra/mcp/auth.py @@ -0,0 +1,166 @@ +from typing import Optional + +from authlib.oauth2.rfc8414 import AuthorizationServerMetadata +from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase +import httpx +import logging + +from mistralai.types import BaseModel + +logger = logging.getLogger(__name__) + + +class Oauth2AuthorizationScheme(BaseModel): + """Information about the oauth flow to perform with the authorization server.""" + + authorization_url: str + token_url: str + scope: list[str] + description: Optional[str] = None + refresh_url: Optional[str] = None + + +class OAuthParams(BaseModel): + """Required params for authorization.""" + + scheme: Oauth2AuthorizationScheme + client_id: str + client_secret: str + + +class AsyncOAuth2Client(AsyncOAuth2ClientBase): + """Subclass of the Async httpx oauth client which provides a constructor from OAuthParams.""" + + @classmethod + def from_oauth_params(cls, oauth_params: OAuthParams) -> "AsyncOAuth2Client": + return cls( + client_id=oauth_params.client_id, + client_secret=oauth_params.client_secret, + scope=oauth_params.scheme.scope, + ) + + +async def get_well_known_authorization_server_metadata( + server_url: str, +) -> Optional[AuthorizationServerMetadata]: + """Fetch the metadata from the well-known location. + + This should be available on MCP servers as described by the specification: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-server-metadata-discovery. + """ + well_known_url = f"{server_url}/.well-known/oauth-authorization-server" + response = await httpx.AsyncClient().get(well_known_url) + if 200 <= response.status_code < 300: + try: + server_metadata = AuthorizationServerMetadata(**response.json()) + server_metadata.validate() + return server_metadata + except ValueError: + logger.exception("Failed to parse oauth well-known metadata") + return None + else: + logger.error(f"Failed to get oauth well-known metadata from {server_url}") + return None + + +async def get_oauth_server_metadata(server_url: str) -> AuthorizationServerMetadata: + """Fetch the metadata from the authorization server to perform the oauth flow.""" + # 1) attempt to get the metadata from the resource server at /.well-known/oauth-protected-resource + # TODO: new self-discovery protocol, not released yet + + # 2) attempt to get the metadata from the authorization server at /.well-known/oauth-authorization-server + metadata = await get_well_known_authorization_server_metadata(server_url=server_url) + if metadata is not None: + return metadata + + # 3) fallback on default endpoints + # https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-3-fallbacks-for-servers-without-metadata-discovery + return AuthorizationServerMetadata( + issuer=server_url, + authorization_endpoint=f"{server_url}/authorize", + token_endpoint=f"{server_url}/token", + register_endpoint=f"{server_url}/register", + response_types_supported=["code"], + response_modes_supported=["query"], + grant_types_supported=["authorization_code", "refresh_token"], + token_endpoint_auth_methods_supported=["client_secret_basic"], + code_challenge_methods_supported=["S256", "plain"], + ) + + +async def dynamic_client_registration( + register_endpoint: str, + redirect_url: str, + async_client: httpx.AsyncClient, +) -> tuple[str, str]: + """Try to register the client dynamically with an MCP server. + + Returns a client_id and client_secret. + """ + # Construct the registration request payload + registration_payload = { + "client_name": "MistralSDKClient", + "grant_types": ["authorization_code", "refresh_token"], + "token_endpoint_auth_method": "client_secret_basic", + "response_types": ["code"], + "redirect_uris": [redirect_url], + } + + # Make the registration request + response = await async_client.post(register_endpoint, json=registration_payload) + try: + response.raise_for_status() + registration_info = response.json() + client_id = registration_info["client_id"] + client_secret = registration_info["client_secret"] + except Exception as e: + raise ValueError( + f"Client registration failed: status={response.status_code}, error={response.text}" + ) from e + return client_id, client_secret + + +async def build_oauth_params( + server_url: str, + redirect_url: str, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, + scope: Optional[list[str]] = None, + async_client: Optional[httpx.AsyncClient] = None, +) -> OAuthParams: + """Get issuer metadata and build the oauth required params.""" + metadata = await get_oauth_server_metadata(server_url=server_url) + oauth_scheme = Oauth2AuthorizationScheme( + authorization_url=metadata.authorization_endpoint, + token_url=metadata.token_endpoint, + scope=scope or [], + refresh_url=metadata.token_endpoint + if "refresh_token" in metadata.grant_types_supported + else None, + ) + if client_id and client_secret: + return OAuthParams( + client_id=client_id, + client_secret=client_secret, + scheme=oauth_scheme, + ) + + # Try to dynamically register the client + if async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + else: + async with httpx.AsyncClient() as async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + return OAuthParams( + client_id=reg_client_id, + client_secret=reg_client_secret, + scheme=oauth_scheme, + ) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py new file mode 100644 index 00000000..8be5585c --- /dev/null +++ b/src/mistralai/extra/mcp/base.py @@ -0,0 +1,155 @@ +from typing import Optional, Union +import logging +import typing +from contextlib import AsyncExitStack +from typing import Protocol, Any + +from mcp import ClientSession +from mcp.types import ListPromptsResult, EmbeddedResource, ImageContent, TextContent + +from mistralai.extra.exceptions import MCPException +from mistralai.models import ( + FunctionTool, + Function, + SystemMessageTypedDict, + AssistantMessageTypedDict, + TextChunkTypedDict, +) + +logger = logging.getLogger(__name__) + + +class MCPSystemPrompt(typing.TypedDict): + description: Optional[str] + messages: list[Union[SystemMessageTypedDict, AssistantMessageTypedDict]] + + +class MCPClientProtocol(Protocol): + """MCP client that converts MCP artifacts to Mistral format.""" + + _name: str + + async def initialize(self, exit_stack: Optional[AsyncExitStack]) -> None: + ... + + async def aclose(self) -> None: + ... + + async def get_tools(self) -> list[FunctionTool]: + ... + + async def execute_tool( + self, name: str, arguments: dict + ) -> list[TextChunkTypedDict]: + ... + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + ... + + async def list_system_prompts(self) -> ListPromptsResult: + ... + + +class MCPClientBase(MCPClientProtocol): + """Base class to implement functionalities from an initialized MCP session.""" + + _session: ClientSession + + def __init__(self, name: Optional[str] = None): + self._name = name or self.__class__.__name__ + self._exit_stack: Optional[AsyncExitStack] = None + self._is_initialized = False + + def _convert_content( + self, mcp_content: Union[TextContent, ImageContent, EmbeddedResource] + ) -> TextChunkTypedDict: + if not mcp_content.type == "text": + raise MCPException("Only supporting text tool responses for now.") + return {"type": "text", "text": mcp_content.text} + + def _convert_content_list( + self, mcp_contents: list[Union[TextContent, ImageContent, EmbeddedResource]] + ) -> list[TextChunkTypedDict]: + content_chunks = [] + for mcp_content in mcp_contents: + content_chunks.append(self._convert_content(mcp_content)) + return content_chunks + + async def get_tools(self) -> list[FunctionTool]: + mcp_tools = await self._session.list_tools() + tools = [] + for mcp_tool in mcp_tools.tools: + tools.append( + FunctionTool( + type="function", + function=Function( + name=mcp_tool.name, + description=mcp_tool.description, + parameters=mcp_tool.inputSchema, + strict=True, + ), + ) + ) + return tools + + async def execute_tool( + self, name: str, arguments: dict[str, Any] + ) -> list[TextChunkTypedDict]: + contents = await self._session.call_tool(name=name, arguments=arguments) + return self._convert_content_list(contents.content) + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + prompt_result = await self._session.get_prompt(name=name, arguments=arguments) + return { + "description": prompt_result.description, + "messages": [ + typing.cast( + Union[SystemMessageTypedDict, AssistantMessageTypedDict], + { + "role": message.role, + "content": self._convert_content(mcp_content=message.content), + }, + ) + for message in prompt_result.messages + ], + } + + async def list_system_prompts(self) -> ListPromptsResult: + return await self._session.list_prompts() + + async def initialize(self, exit_stack: Optional[AsyncExitStack] = None) -> None: + """Initialize the MCP session.""" + # client is already initialized so return + if self._is_initialized: + return + if exit_stack is None: + self._exit_stack = AsyncExitStack() + exit_stack = self._exit_stack + stdio_transport = await self._get_transport(exit_stack=exit_stack) + mcp_session = await exit_stack.enter_async_context( + ClientSession( + read_stream=stdio_transport[0], + write_stream=stdio_transport[1], + ) + ) + await mcp_session.initialize() + self._session = mcp_session + self._is_initialized = True + + async def aclose(self): + """Close the MCP session.""" + if self._exit_stack: + await self._exit_stack.aclose() + + def __repr__(self): + return f"<{self.__class__.__name__} name={self._name!r} id=0x{id(self):x}>" + + def __str__(self): + return f"{self.__class__.__name__}(name={self._name})" + + async def _get_transport(self, exit_stack: AsyncExitStack): + raise NotImplementedError diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py new file mode 100644 index 00000000..2dfe7a2d --- /dev/null +++ b/src/mistralai/extra/mcp/sse.py @@ -0,0 +1,165 @@ +import http +import logging +import typing +from typing import Any, Optional +from contextlib import AsyncExitStack +from functools import cached_property + +import httpx + +from mistralai.extra.exceptions import MCPAuthException +from mistralai.extra.mcp.base import ( + MCPClientBase, +) +from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream + +from mcp.client.sse import sse_client +from mcp.shared.message import SessionMessage +from authlib.oauth2.rfc6749 import OAuth2Token + +from mistralai.types import BaseModel + +logger = logging.getLogger(__name__) + + +class SSEServerParams(BaseModel): + """Parameters required for a MCPClient with SSE transport""" + + url: str + headers: Optional[dict[str, Any]] = None + timeout: float = 5 + sse_read_timeout: float = 60 * 5 + + +class MCPClientSSE(MCPClientBase): + """MCP client that uses sse for communication. + + The client provides authentication for OAuth2 protocol following the current MCP authorization spec: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization. + + This is possibly going to change in the future since the protocol has ongoing discussions. + """ + + _oauth_params: Optional[OAuthParams] + _sse_params: SSEServerParams + + def __init__( + self, + sse_params: SSEServerParams, + name: Optional[str] = None, + oauth_params: Optional[OAuthParams] = None, + auth_token: Optional[OAuth2Token] = None, + ): + super().__init__(name=name) + self._sse_params = sse_params + self._oauth_params: Optional[OAuthParams] = oauth_params + self._auth_token: Optional[OAuth2Token] = auth_token + + @cached_property + def base_url(self) -> str: + return self._sse_params.url.rstrip("/sse") + + def set_oauth_params(self, oauth_params: OAuthParams): + """Update the oauth params and client accordingly.""" + if self._oauth_params is not None: + logger.warning(f"Overriding current oauth params for {self._name}") + self._oauth_params = oauth_params + + async def get_auth_url_and_state(self, redirect_url: str) -> tuple[str, str]: + """Create the authorization url for client to start oauth flow.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't generate an authorization url without oauth_params being set, " + "make sure the oauth params have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + auth_url, state = oauth_client.create_authorization_url( + self._oauth_params.scheme.authorization_url, redirect_uri=redirect_url + ) + return auth_url, state + + async def get_token_from_auth_response( + self, + authorization_response: str, + redirect_url: str, + state: str, + ) -> OAuth2Token: + """Fetch the authentication token from the server.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't fetch a token without oauth_params, make sure they have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.fetch_token( + url=self._oauth_params.scheme.token_url, + authorization_response=authorization_response, + redirect_uri=redirect_url, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + state=state, + ) + return oauth_token + + async def refresh_auth_token(self): + """Refresh an expired token.""" + if self._oauth_params is None or self._oauth_params.scheme.refresh_url is None: + raise MCPAuthException( + "Can't refresh a token without a refresh url make sure the oauth params have been set." + ) + if self._auth_token is None: + raise MCPAuthException( + "Can't refresh a token without a refresh token, use the `set_auth_token` to add a OAuth2Token." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.refresh_token( + url=self._oauth_params.scheme.refresh_url, + refresh_token=self._auth_token["refresh_token"], + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + self.set_auth_token(oauth_token) + + def set_auth_token(self, token: OAuth2Token) -> None: + """Register the authentication token with this client.""" + self._auth_token = token + + def _format_headers(self) -> dict[str, str]: + headers: dict[str, str] = {} + if self._sse_params.headers: + headers |= self._sse_params.headers + if self._auth_token: + headers["Authorization"] = f"Bearer {self._auth_token['access_token']}" + return headers + + async def requires_auth(self) -> bool: + """Check if the client requires authentication to communicate with the server.""" + response = httpx.get( + self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + ) + return response.status_code == http.HTTPStatus.UNAUTHORIZED + + async def _get_transport( + self, exit_stack: AsyncExitStack + ) -> tuple[ + MemoryObjectReceiveStream[typing.Union[SessionMessage, Exception]], + MemoryObjectSendStream[SessionMessage], + ]: + try: + return await exit_stack.enter_async_context( + sse_client( + url=self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + sse_read_timeout=self._sse_params.sse_read_timeout, + ) + ) + except Exception as e: + if isinstance(e, httpx.HTTPStatusError): + if e.response.status_code == http.HTTPStatus.UNAUTHORIZED: + if self._oauth_params is None: + raise MCPAuthException( + "Authentication required but no auth params provided." + ) from e + raise MCPAuthException("Authentication required.") from e + raise diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py new file mode 100644 index 00000000..28c3b8c5 --- /dev/null +++ b/src/mistralai/extra/mcp/stdio.py @@ -0,0 +1,22 @@ +from typing import Optional +import logging +from contextlib import AsyncExitStack + +from mistralai.extra.mcp.base import ( + MCPClientBase, +) + +from mcp import stdio_client, StdioServerParameters + +logger = logging.getLogger(__name__) + + +class MCPClientSTDIO(MCPClientBase): + """MCP client that uses stdio for communication.""" + + def __init__(self, stdio_params: StdioServerParameters, name: Optional[str] = None): + super().__init__(name=name) + self._stdio_params = stdio_params + + async def _get_transport(self, exit_stack: AsyncExitStack): + return await exit_stack.enter_async_context(stdio_client(self._stdio_params)) diff --git a/src/mistralai/extra/run/__init__.py b/src/mistralai/extra/run/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py new file mode 100644 index 00000000..a79fd59e --- /dev/null +++ b/src/mistralai/extra/run/context.py @@ -0,0 +1,295 @@ +import asyncio +import inspect +import typing +from contextlib import AsyncExitStack +from functools import wraps +from collections.abc import Callable + +from dataclasses import dataclass, field +from typing import Union, Optional + +import pydantic + +from mistralai.extra import ( + response_format_from_pydantic_model, +) +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.run.result import RunResult +from mistralai.types.basemodel import OptionalNullable, BaseModel, UNSET +from mistralai.models import ( + ResponseFormat, + FunctionCallEntry, + Tools, + ToolsTypedDict, + CompletionArgs, + CompletionArgsTypedDict, + FunctionResultEntry, + ConversationInputs, + ConversationInputsTypedDict, + FunctionTool, + MessageInputEntry, + InputEntries, + ResponseFormatTypedDict, +) + +from logging import getLogger + +from mistralai.extra.run.tools import ( + create_function_result, + RunFunction, + create_tool_call, + RunTool, + RunMCPTool, + RunCoroutine, +) + +if typing.TYPE_CHECKING: + from mistralai import Beta, OptionalNullable + +logger = getLogger(__name__) + + +class AgentRequestKwargs(typing.TypedDict): + agent_id: str + + +class ModelRequestKwargs(typing.TypedDict): + model: str + instructions: OptionalNullable[str] + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] + completion_args: OptionalNullable[Union[CompletionArgs, CompletionArgsTypedDict]] + + +@dataclass +class RunContext: + """A context for running a conversation with an agent or a model. + + The context can be used to execute function calls, connect to MCP server, and keep track of information about + the run. + + Args: + conversation_id (Options[str]): The unique identifier for the conversation. This is + passed if the user wants to continue an existing conversation. + model (Options[str]): The model name to be used for the conversation. Can't be used along with 'agent_id'. + agent_id (Options[str]): The agent id to be used for the conversation. Can't be used along with 'model'. + output_format (Optional[type[BaseModel]]): The output format expected from the conversation. It represents + the `response_format` which is part of the `CompletionArgs`. + request_count (int): The number of requests made in the current `RunContext`. + continue_on_fn_error (bool): Flag to determine if the conversation should continue when function execution + resulted in an error. + """ + + _exit_stack: AsyncExitStack = field(init=False) + _callable_tools: dict[str, RunTool] = field(init=False, default_factory=dict) + _mcp_clients: list[MCPClientProtocol] = field(init=False, default_factory=list) + + conversation_id: Optional[str] = field(default=None) + model: Optional[str] = field(default=None) + agent_id: Optional[str] = field(default=None) + output_format: Optional[type[BaseModel]] = field(default=None) + request_count: int = field(default=0) + continue_on_fn_error: bool = field(default=False) + + def __post_init__(self): + if self.model and self.agent_id: + raise RunException("Only one for model or agent_id should be set") + self._exit_stack = AsyncExitStack() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self._exit_stack.aclose() + for mcp_client in self._mcp_clients: + await mcp_client.aclose() + + def register_func(self, func: Callable): + """Add a function to the context.""" + if not inspect.isfunction(func): + raise RunException( + "Only object of type function can be registered at the moment." + ) + + if inspect.iscoroutinefunction(func): + self._callable_tools[func.__name__] = RunCoroutine( + name=func.__name__, + awaitable=func, + tool=create_tool_call(func), + ) + else: + self._callable_tools[func.__name__] = RunFunction( + name=func.__name__, + callable=func, + tool=create_tool_call(func), + ) + + @wraps(func) + def wrapper(*args, **kwargs): + logger.info(f"Executing {func.__name__}") + return func(*args, **kwargs) + + return wrapper + + async def register_mcp_clients(self, mcp_clients: list[MCPClientProtocol]) -> None: + """Registering multiple MCP clients at the same time in the same asyncio.Task.""" + for mcp_client in mcp_clients: + await self.register_mcp_client(mcp_client) + + async def register_mcp_client(self, mcp_client: MCPClientProtocol) -> None: + """Add a MCP client to the context.""" + await mcp_client.initialize(exit_stack=self._exit_stack) + tools = await mcp_client.get_tools() + for tool in tools: + logger.info( + f"Adding tool {tool.function.name} from {mcp_client._name or 'mcp client'}" + ) + self._callable_tools[tool.function.name] = RunMCPTool( + name=tool.function.name, + tool=tool, + mcp_client=mcp_client, + ) + self._mcp_clients.append(mcp_client) + + async def execute_function_calls( + self, function_calls: list[FunctionCallEntry] + ) -> list[FunctionResultEntry]: + """Execute function calls and create function results from them.""" + if not all( + function_call.name in self._callable_tools + for function_call in function_calls + ): + logger.warning("Can't execute all functions, stopping run here") + return [] + function_result_tasks = [] + for function_call in function_calls: + function_result_tasks.append( + asyncio.create_task( + create_function_result( + function_call=function_call, + run_tool=self._callable_tools[function_call.name], + continue_on_fn_error=self.continue_on_fn_error, + ) + ) + ) + await asyncio.gather(*function_result_tasks) + return [task.result() for task in function_result_tasks] + + def get_tools(self) -> list[FunctionTool]: + """Get the tools that are part of the context.""" + callable_tools = [ + run_functions.tool for run_functions in self._callable_tools.values() + ] + return callable_tools + + async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs: + """Prepare an agent request with the functions added to the context. + + Update the agent definition before making the request. + """ + if self.agent_id is None: + raise RunException( + "Can't prepare an agent request, if no agent_id is provided" + ) + agent = await beta_client.agents.get_async(agent_id=self.agent_id) + agent_tools = agent.tools or [] + updated_tools = [] + for i in range(len(agent_tools)): + tool = agent_tools[i] + if tool.type != "function": + updated_tools.append(tool) + elif tool.function.name in self._callable_tools: + # function already exists in the agent, don't add it again + continue + else: + updated_tools.append(tool) + updated_tools += self.get_tools() + completion_args = ( + CompletionArgs(response_format=self.response_format) + if self.output_format + else None + ) + beta_client.agents.update( + agent_id=self.agent_id, tools=updated_tools, completion_args=completion_args + ) + return AgentRequestKwargs(agent_id=self.agent_id) + + async def prepare_model_request( + self, + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, + completion_args: OptionalNullable[ + Union[CompletionArgs, CompletionArgsTypedDict] + ] = UNSET, + instructions: OptionalNullable[str] = None, + ) -> ModelRequestKwargs: + if self.model is None: + raise RunException("Can't prepare a model request, if no model is provided") + if not completion_args and self.output_format: + completion_args = CompletionArgs(response_format=self.response_format) + elif isinstance(completion_args, CompletionArgs) and self.output_format: + completion_args.response_format = self.response_format + elif isinstance(completion_args, dict) and self.output_format: + completion_args["response_format"] = typing.cast( + ResponseFormatTypedDict, self.response_format.model_dump() + ) + request_tools = [] + if isinstance(tools, list): + for tool in tools: + request_tools.append(typing.cast(Tools, tool)) + for tool in self.get_tools(): + request_tools.append(tool) + return ModelRequestKwargs( + model=self.model, + tools=request_tools, + instructions=instructions, + completion_args=completion_args, + ) + + @property + def response_format(self) -> ResponseFormat: + if not self.output_format: + raise RunException("No response format exist for the current RunContext.") + return response_format_from_pydantic_model(self.output_format) + + +async def _validate_run( + *, + beta_client: "Beta", + run_ctx: RunContext, + inputs: Union[ConversationInputs, ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, + completion_args: OptionalNullable[ + Union[CompletionArgs, CompletionArgsTypedDict] + ] = UNSET, +) -> tuple[ + Union[AgentRequestKwargs, ModelRequestKwargs], RunResult, list[InputEntries] +]: + input_entries: list[InputEntries] = [] + if isinstance(inputs, str): + input_entries.append(MessageInputEntry(role="user", content=inputs)) + else: + for input in inputs: + if isinstance(input, dict): + input_entries.append( + pydantic.TypeAdapter(InputEntries).validate_python(input) + ) + run_result = RunResult( + input_entries=input_entries, + output_model=run_ctx.output_format, + conversation_id=run_ctx.conversation_id, + ) + req: Union[AgentRequestKwargs, ModelRequestKwargs] + if run_ctx.agent_id: + if tools or completion_args: + raise RunException("Can't set tools or completion_args when using an agent") + req = await run_ctx.prepare_agent_request(beta_client=beta_client) + elif run_ctx.model: + req = await run_ctx.prepare_model_request( + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + else: + raise RunException("Either agent_id or model must be set in the run context") + return req, run_result, input_entries diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py new file mode 100644 index 00000000..9592dccf --- /dev/null +++ b/src/mistralai/extra/run/result.py @@ -0,0 +1,212 @@ +import datetime +import json +import typing +from typing import Union, Annotated, Optional, Literal +from dataclasses import dataclass, field +from pydantic import Discriminator, Tag, BaseModel + +from mistralai.extra.utils.response_format import pydantic_model_from_json +from mistralai.models import ( + FunctionResultEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + MessageInputEntry, + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, + ConversationEventsData, + MessageOutputEventContent, + MessageOutputEntryContent, + TextChunk, + MessageOutputContentChunks, + SSETypes, + InputEntries, + ToolFileChunk, + ToolReferenceChunk, + FunctionCallEntryArguments, +) +from mistralai.utils import get_discriminator + +RunOutputEntries = typing.Union[ + MessageOutputEntry, + FunctionCallEntry, + FunctionResultEntry, + AgentHandoffEntry, + ToolExecutionEntry, +] + +RunEntries = typing.Union[RunOutputEntries, MessageInputEntry] + + +def as_text(entry: RunOutputEntries) -> str: + """Keep only the messages and turn content into textual representation.""" + text = "" + if isinstance(entry, MessageOutputEntry): + if isinstance(entry.content, str): + text += entry.content + else: + for chunk in entry.content: + if isinstance(chunk, TextChunk): + text += chunk.text + elif isinstance(chunk, ToolFileChunk): + text += f"" + elif isinstance(chunk, ToolReferenceChunk): + text += f"" + return text + + +def reconstitute_message_content( + chunks: list[MessageOutputEventContent], +) -> MessageOutputEntryContent: + """Given a list of MessageOutputEventContent, recreate a normalised MessageOutputEntryContent.""" + if all(isinstance(chunk, str) for chunk in chunks): + return "".join(typing.cast(list[str], chunks)) + content: list[MessageOutputContentChunks] = [] + for chunk in chunks: + if isinstance(chunk, str): + chunk = TextChunk(text=chunk) + if isinstance(chunk, TextChunk): + if len(content) and isinstance(content[-1], TextChunk): + content[-1].text += chunk.text + else: + content.append(chunk) + else: + content.append(chunk) + return content + + +def reconstitute_function_call_args(chunks: list[str]) -> FunctionCallEntryArguments: + """Recreates function call arguments from stream""" + return typing.cast(FunctionCallEntryArguments, "".join(chunks)) + + +def reconstitue_entries( + received_event_tracker: dict[int, list[ConversationEventsData]], +) -> list[RunOutputEntries]: + """Given a list of events, recreate the corresponding entries.""" + run_entries: list[RunOutputEntries] = [] + for idx, events in sorted(received_event_tracker.items(), key=lambda x: x[0]): + first_event = events[0] + if isinstance(first_event, MessageOutputEvent): + message_events = typing.cast(list[MessageOutputEvent], events) + run_entries.append( + MessageOutputEntry( + content=reconstitute_message_content( + chunks=[ + message_event.content for message_event in message_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + agent_id=first_event.agent_id, + model=first_event.model, + role=first_event.role, + ) + ) + elif isinstance(first_event, FunctionCallEvent): + function_call_events = typing.cast(list[FunctionCallEvent], events) + run_entries.append( + FunctionCallEntry( + name=first_event.name, + arguments=reconstitute_function_call_args( + chunks=[ + function_call_event.arguments + for function_call_event in function_call_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + tool_call_id=first_event.tool_call_id, + ) + ) + return run_entries + + +@dataclass +class RunFiles: + id: str + name: str + content: bytes + + +@dataclass +class RunResult: + input_entries: list[InputEntries] + conversation_id: Optional[str] = field(default=None) + output_entries: list[RunOutputEntries] = field(default_factory=list) + files: dict[str, RunFiles] = field(default_factory=dict) + output_model: Optional[type[BaseModel]] = field(default=None) + + def get_file(self, file_id: str) -> Optional[RunFiles]: + return self.files.get(file_id) + + @property + def entries(self) -> list[RunEntries]: + return [*self.input_entries, *self.output_entries] + + @property + def output_as_text(self) -> str: + if not self.output_entries: + raise ValueError("No output entries were started.") + return "\n".join( + as_text(entry) + for entry in self.output_entries + if entry.type == "message.output" + ) + + @property + def output_as_model(self) -> BaseModel: + if self.output_model is None: + raise ValueError("No output format was not set.") + return pydantic_model_from_json( + json.loads(self.output_as_text), self.output_model + ) + + +class FunctionResultEvent(BaseModel): + id: Optional[str] = None + + type: Optional[Literal["function.result"]] = "function.result" + + result: str + + tool_call_id: str + + created_at: Optional[datetime.datetime] = datetime.datetime.now( + tz=datetime.timezone.utc + ) + + output_index: Optional[int] = 0 + + +RunResultEventsType = typing.Union[SSETypes, Literal["function.result"]] + +RunResultEventsData = typing.Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + Annotated[FunctionResultEvent, Tag("function.result")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class RunResultEvents(BaseModel): + event: RunResultEventsType + + data: RunResultEventsData diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py new file mode 100644 index 00000000..81fec665 --- /dev/null +++ b/src/mistralai/extra/run/tools.py @@ -0,0 +1,225 @@ +import itertools +import logging +from dataclasses import dataclass +import inspect + +from pydantic import Field, create_model +from pydantic.fields import FieldInfo +import json +from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union + +from griffe import ( + Docstring, + DocstringSectionKind, + DocstringSectionText, + DocstringParameter, + DocstringSection, +) + +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.run.result import RunOutputEntries +from mistralai.models import ( + FunctionResultEntry, + FunctionTool, + Function, + FunctionCallEntry, +) + + +logger = logging.getLogger(__name__) + + +@dataclass +class RunFunction: + name: str + callable: Callable + tool: FunctionTool + + +@dataclass +class RunCoroutine: + name: str + awaitable: Callable + tool: FunctionTool + + +@dataclass +class RunMCPTool: + name: str + tool: FunctionTool + mcp_client: MCPClientProtocol + + +RunTool = Union[RunFunction, RunCoroutine, RunMCPTool] + + +def _get_function_description(docstring_sections: list[DocstringSection]) -> str: + """Given a list of docstring sections create a description for the function.""" + text_sections: list[DocstringSectionText] = [] + for section in docstring_sections: + if section.kind == DocstringSectionKind.text: + text_sections.append(cast(DocstringSectionText, section)) + return "\n".join(text.value for text in text_sections) + + +def _get_function_parameters( + docstring_sections: list[DocstringSection], + params_from_sig: list[inspect.Parameter], + type_hints: dict[str, Any], +): + """Given a list of docstring sections and type annotations create the most accurate tool parameters""" + params_from_docstrings: list[DocstringParameter] = list( + itertools.chain.from_iterable( + section.value + for section in docstring_sections + if section.kind + in (DocstringSectionKind.parameters, DocstringSectionKind.other_parameters) + ) + ) + + # Extract all description and annotation + param_descriptions = {} + param_annotations = {} + + for param_doc in params_from_docstrings: + param_descriptions[param_doc.name] = param_doc.description + + for param in params_from_sig: + if param.name not in param_descriptions: + param_descriptions[param.name] = "" + param_annotations[param.name] = type_hints.get(param.name) + + # resolve all params into Field and create the parameters schema + fields: dict[str, tuple[type, FieldInfo]] = {} + for p in params_from_sig: + default = p.default if p.default is not inspect.Parameter.empty else ... + annotation = ( + p.annotation if p.annotation is not inspect.Parameter.empty else Any + ) + # handle forward ref with the help of get_type_hints + if isinstance(annotation, str): + annotation = type_hints[p.name] + + if isinstance(default, FieldInfo): + field_info = default + else: + # If the annotation is Annotated[..., Field(...)] extract the Field and annotation + # Otherwise, just use the annotation as-is + field_info = None + # If it's Annotated[..., SomeFieldMarker(...)], find it + if hasattr(annotation, "__metadata__") and hasattr(annotation, "__args__"): + # It's Annotated + # e.g. Annotated[str, Field(...)] + # Extract the first Field(...) or None if not found + for meta in annotation.__metadata__: # type: ignore + if isinstance(meta, FieldInfo): + field_info = meta + break + # The actual annotation is the first part of Annotated + annotation = annotation.__args__[0] # type: ignore + + # handle forward ref with the help of get_type_hints + if isinstance(annotation, ForwardRef): + annotation = param_annotations[p.name] + + # no Field + if field_info is None: + if default is ...: + field_info = Field() + else: + field_info = Field(default=default) + + field_info.description = param_descriptions[p.name] + fields[p.name] = (cast(type, annotation), field_info) + + schema = create_model("_", **fields).model_json_schema() # type: ignore[call-overload] + schema.pop("title", None) + for prop in schema.get("properties", {}).values(): + prop.pop("title", None) + return schema + + +def create_tool_call(func: Callable) -> FunctionTool: + """Parse a function docstring / type annotations to create a FunctionTool.""" + name = func.__name__ + + # Inspect and parse the docstring of the function + doc = inspect.getdoc(func) + docstring_sections: list[DocstringSection] + if not doc: + logger.warning( + f"Function '{name}' without a docstring is being parsed, add docstring for more accurate result." + ) + docstring_sections = [] + else: + docstring = Docstring(doc, parser="google") + docstring_sections = docstring.parse(warnings=False) + if len(docstring_sections) == 0: + logger.warning( + f"Function '{name}' has no relevant docstring sections, add docstring for more accurate result." + ) + + # Extract the function's signature and type hints + sig = inspect.signature(func) + params_from_sig = list(sig.parameters.values()) + type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) + + return FunctionTool( + type="function", + function=Function( + name=name, + description=_get_function_description(docstring_sections), + parameters=_get_function_parameters( + docstring_sections=docstring_sections, + params_from_sig=params_from_sig, + type_hints=type_hints, + ), + strict=True, + ), + ) + + +async def create_function_result( + function_call: FunctionCallEntry, + run_tool: RunTool, + continue_on_fn_error: bool = False, +) -> FunctionResultEntry: + """Run the function with arguments of a FunctionCallEntry.""" + arguments = ( + json.loads(function_call.arguments) + if isinstance(function_call.arguments, str) + else function_call.arguments + ) + try: + if isinstance(run_tool, RunFunction): + res = run_tool.callable(**arguments) + elif isinstance(run_tool, RunCoroutine): + res = await run_tool.awaitable(**arguments) + elif isinstance(run_tool, RunMCPTool): + res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) + except Exception as e: + if continue_on_fn_error is True: + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=f"Error while executing {function_call.name}: {str(e)}", + ) + raise RunException( + f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" + ) from e + + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=res if isinstance(res, str) else json.dumps(res), + ) + + +def get_function_calls( + output_entries: Sequence[RunOutputEntries], +) -> list[FunctionCallEntry]: + """Extract all FunctionCallEntry from a conversation response""" + function_calls = [] + for entry in output_entries: + if isinstance(entry, FunctionCallEntry): + function_calls.append(entry) + return function_calls diff --git a/src/mistralai/extra/run/utils.py b/src/mistralai/extra/run/utils.py new file mode 100644 index 00000000..231c7131 --- /dev/null +++ b/src/mistralai/extra/run/utils.py @@ -0,0 +1,36 @@ +import importlib.util +import sys +from typing import Callable, TypeVar, Any, cast +from functools import wraps + +from mistralai.extra.exceptions import MistralClientException + +F = TypeVar("F", bound=Callable[..., Any]) + + +REQUIRED_PYTHON_VERSION = (3, 10) +REQUIRED_PYTHON_VERSION_STR = "3.10" +REQUIRED_PACKAGES = ["mcp"] + + +def is_module_installed(module_name: str) -> bool: + spec = importlib.util.find_spec(module_name) + return spec is not None + + +def run_requirements(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs): + if sys.version_info < REQUIRED_PYTHON_VERSION: + raise MistralClientException( + f"{func.__name__} requires a Python version higher than {REQUIRED_PYTHON_VERSION_STR}." + f"You are using Python {sys.version_info.major}.{sys.version_info.minor}." + ) + for package in REQUIRED_PACKAGES: + if not is_module_installed(package): + raise MistralClientException( + f"{func.__name__} requires the sdk to be installed with 'agents' extra dependencies." + ) + return func(*args, **kwargs) + + return cast(F, wrapper) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py new file mode 100644 index 00000000..5fdd8f32 --- /dev/null +++ b/src/mistralai/mistral_agents.py @@ -0,0 +1,1158 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, List, Mapping, Optional, Union + + +class MistralAgents(BaseSDK): + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentCreationRequestTools], + List[models.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentCreationRequestTools], + List[models.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.Agent]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.Agent]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent retrieve an agent entity with its attributes. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent retrieve an agent entity with its attributes. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentUpdateRequestTools], + List[models.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentUpdateRequestTools], + List[models.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index d56f5bf8..cf121986 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,5 +1,85 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .agent import Agent, AgentObject, AgentTools, AgentToolsTypedDict, AgentTypedDict +from .agentconversation import ( + AgentConversation, + AgentConversationObject, + AgentConversationTypedDict, +) +from .agentcreationrequest import ( + AgentCreationRequest, + AgentCreationRequestTools, + AgentCreationRequestToolsTypedDict, + AgentCreationRequestTypedDict, +) +from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventType, + AgentHandoffDoneEventTypedDict, +) +from .agenthandoffentry import ( + AgentHandoffEntry, + AgentHandoffEntryObject, + AgentHandoffEntryType, + AgentHandoffEntryTypedDict, +) +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventType, + AgentHandoffStartedEventTypedDict, +) +from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, +) +from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, +) +from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, +) +from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, +) +from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, +) +from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, +) +from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, +) +from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, +) +from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + ResponseBody, + ResponseBodyTypedDict, +) +from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, +) +from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, +) +from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, +) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessages, @@ -20,6 +100,12 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) +from .agentupdaterequest import ( + AgentUpdateRequest, + AgentUpdateRequestTools, + AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTypedDict, +) from .apiendpoint import APIEndpoint from .archiveftmodelout import ( ArchiveFTModelOut, @@ -39,6 +125,7 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus +from .builtinconnectors import BuiltInConnectors from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -131,6 +218,13 @@ ClassifierTrainingParametersIn, ClassifierTrainingParametersInTypedDict, ) +from .codeinterpretertool import ( + CodeInterpreterTool, + CodeInterpreterToolType, + CodeInterpreterToolTypedDict, +) +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completiondetailedjobout import ( CompletionDetailedJobOut, @@ -152,11 +246,11 @@ ) from .completionjobout import ( CompletionJobOut, + CompletionJobOutObject, CompletionJobOutTypedDict, Integrations, IntegrationsTypedDict, JobType, - Object, Repositories, RepositoriesTypedDict, Status, @@ -175,6 +269,67 @@ CompletionTrainingParametersInTypedDict, ) from .contentchunk import ContentChunk, ContentChunkTypedDict +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, +) +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, +) +from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, +) +from .conversationhistory import ( + ConversationHistory, + ConversationHistoryObject, + ConversationHistoryTypedDict, + Entries, + EntriesTypedDict, +) +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .conversationmessages import ( + ConversationMessages, + ConversationMessagesObject, + ConversationMessagesTypedDict, +) +from .conversationrequest import ( + ConversationRequest, + ConversationRequestTypedDict, + HandoffExecution, + Tools, + ToolsTypedDict, +) +from .conversationresponse import ( + ConversationResponse, + ConversationResponseObject, + ConversationResponseTypedDict, + Outputs, + OutputsTypedDict, +) +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, +) +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, +) +from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTools, + ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTypedDict, +) +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from .delete_model_v1_models_model_id_deleteop import ( DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict, @@ -182,6 +337,11 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict +from .documentlibrarytool import ( + DocumentLibraryTool, + DocumentLibraryToolType, + DocumentLibraryToolTypedDict, +) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -252,7 +412,29 @@ FunctionCall, FunctionCallTypedDict, ) +from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryObject, + FunctionCallEntryType, + FunctionCallEntryTypedDict, +) +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventType, + FunctionCallEventTypedDict, +) from .functionname import FunctionName, FunctionNameTypedDict +from .functionresultentry import ( + FunctionResultEntry, + FunctionResultEntryObject, + FunctionResultEntryType, + FunctionResultEntryTypedDict, +) +from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict from .githubrepositoryin import ( GithubRepositoryIn, GithubRepositoryInType, @@ -264,6 +446,11 @@ GithubRepositoryOutTypedDict, ) from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .imagegenerationtool import ( + ImageGenerationTool, + ImageGenerationToolType, + ImageGenerationToolTypedDict, +) from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, @@ -272,6 +459,7 @@ ImageURLChunkType, ImageURLChunkTypedDict, ) +from .inputentries import InputEntries, InputEntriesTypedDict from .inputs import ( Inputs, InputsTypedDict, @@ -366,8 +554,50 @@ LegacyJobMetadataOutTypedDict, ) from .listfilesout import ListFilesOut, ListFilesOutTypedDict +from .messageentries import MessageEntries, MessageEntriesTypedDict +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryRole, + MessageInputEntryType, + MessageInputEntryTypedDict, + Object, +) +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryObject, + MessageOutputEntryRole, + MessageOutputEntryType, + MessageOutputEntryTypedDict, +) +from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventRole, + MessageOutputEventType, + MessageOutputEventTypedDict, +) from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from .modelconversation import ( + ModelConversation, + ModelConversationObject, + ModelConversationTools, + ModelConversationToolsTypedDict, + ModelConversationTypedDict, +) from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict @@ -377,10 +607,26 @@ from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict from .ocrresponse import OCRResponse, OCRResponseTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict +from .responsedoneevent import ( + ResponseDoneEvent, + ResponseDoneEventType, + ResponseDoneEventTypedDict, +) +from .responseerrorevent import ( + ResponseErrorEvent, + ResponseErrorEventType, + ResponseErrorEventTypedDict, +) from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats +from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventType, + ResponseStartedEventTypedDict, +) from .retrieve_model_v1_models_model_id_getop import ( RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, @@ -392,6 +638,7 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source +from .ssetypes import SSETypes from .systemmessage import ( Role, SystemMessage, @@ -404,6 +651,23 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventType, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionentry import ( + ToolExecutionEntry, + ToolExecutionEntryObject, + ToolExecutionEntryType, + ToolExecutionEntryTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventType, + ToolExecutionStartedEventTypedDict, +) +from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict from .toolmessage import ( ToolMessage, ToolMessageContent, @@ -411,6 +675,11 @@ ToolMessageRole, ToolMessageTypedDict, ) +from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkType, + ToolReferenceChunkTypedDict, +) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict from .unarchiveftmodelout import ( @@ -444,10 +713,68 @@ WandbIntegrationOutType, WandbIntegrationOutTypedDict, ) +from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolType, + WebSearchPremiumToolTypedDict, +) +from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict __all__ = [ "APIEndpoint", + "Agent", + "AgentConversation", + "AgentConversationObject", + "AgentConversationTypedDict", + "AgentCreationRequest", + "AgentCreationRequestTools", + "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventType", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryObject", + "AgentHandoffEntryType", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventType", + "AgentHandoffStartedEventTypedDict", + "AgentObject", + "AgentTools", + "AgentToolsTypedDict", + "AgentTypedDict", + "AgentUpdateRequest", + "AgentUpdateRequestTools", + "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", @@ -487,6 +814,7 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", + "BuiltInConnectors", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -545,6 +873,13 @@ "ClassifierTrainingParametersIn", "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolType", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionDetailedJobOut", @@ -562,6 +897,7 @@ "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", + "CompletionJobOutObject", "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", @@ -574,6 +910,42 @@ "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryObject", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesObject", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseObject", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTools", + "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", "Data", "DataTypedDict", "DeleteFileOut", @@ -585,6 +957,9 @@ "DeltaMessage", "DeltaMessageTypedDict", "Document", + "DocumentLibraryTool", + "DocumentLibraryToolType", + "DocumentLibraryToolTypedDict", "DocumentTypedDict", "DocumentURLChunk", "DocumentURLChunkType", @@ -597,6 +972,8 @@ "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", + "Entries", + "EntriesTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", @@ -638,9 +1015,25 @@ "FinishReason", "Function", "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryObject", + "FunctionCallEntryType", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventType", + "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryObject", + "FunctionResultEntryType", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolType", + "FunctionToolTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", @@ -650,8 +1043,12 @@ "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "HandoffExecution", "Hyperparameters", "HyperparametersTypedDict", + "ImageGenerationTool", + "ImageGenerationToolType", + "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", "ImageURLChunkImageURL", @@ -659,6 +1056,8 @@ "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "InputEntries", + "InputEntriesTypedDict", "Inputs", "InputsTypedDict", "InstructRequest", @@ -724,12 +1123,42 @@ "ListFilesOutTypedDict", "Loc", "LocTypedDict", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryRole", + "MessageInputEntryType", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryObject", + "MessageOutputEntryRole", + "MessageOutputEntryType", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventRole", + "MessageOutputEventType", + "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationObject", + "ModelConversationTools", + "ModelConversationToolsTypedDict", + "ModelConversationTypedDict", "ModelList", "ModelListTypedDict", "ModelType", @@ -752,6 +1181,10 @@ "Object", "One", "OneTypedDict", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "Outputs", + "OutputsTypedDict", "Prediction", "PredictionTypedDict", "QueryParamStatus", @@ -762,9 +1195,20 @@ "RepositoriesTypedDict", "Response1", "Response1TypedDict", + "ResponseBody", + "ResponseBodyTypedDict", + "ResponseDoneEvent", + "ResponseDoneEventType", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventType", + "ResponseErrorEventTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseStartedEvent", + "ResponseStartedEventType", + "ResponseStartedEventTypedDict", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", @@ -773,6 +1217,7 @@ "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", "Role", "SDKError", + "SSETypes", "SampleType", "Security", "SecurityTypedDict", @@ -793,13 +1238,31 @@ "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventType", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryObject", + "ToolExecutionEntryType", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventType", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkType", + "ToolFileChunkTypedDict", "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", "ToolMessageRole", "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkType", + "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", + "Tools", + "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", "Two", @@ -827,4 +1290,10 @@ "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolType", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolType", + "WebSearchToolTypedDict", ] diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py new file mode 100644 index 00000000..ce750606 --- /dev/null +++ b/src/mistralai/models/agent.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolsTypedDict = TypeAliasType( + "AgentToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentObject = Literal["agent"] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + created_at: datetime + updated_at: datetime + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + object: NotRequired[AgentObject] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + created_at: datetime + + updated_at: datetime + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + object: Optional[AgentObject] = "agent" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "object", + ] + nullable_fields = ["instructions", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py new file mode 100644 index 00000000..66d6d9f5 --- /dev/null +++ b/src/mistralai/models/agentconversation.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentConversationObject = Literal["conversation"] + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + object: NotRequired[AgentConversationObject] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + object: Optional[AgentConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description", "object"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py new file mode 100644 index 00000000..7e0a1fa2 --- /dev/null +++ b/src/mistralai/models/agentcreationrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentCreationRequestToolsTypedDict = TypeAliasType( + "AgentCreationRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentCreationRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentCreationRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + + +class AgentCreationRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentCreationRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + ] + nullable_fields = ["instructions", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..fa545a02 --- /dev/null +++ b/src/mistralai/models/agenthandoffdoneevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffDoneEventType = Literal["agent.handoff.done"] + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: NotRequired[AgentHandoffDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py new file mode 100644 index 00000000..b8e356c9 --- /dev/null +++ b/src/mistralai/models/agenthandoffentry.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffEntryObject = Literal["entry"] + +AgentHandoffEntryType = Literal["agent.handoff"] + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: NotRequired[AgentHandoffEntryObject] + type: NotRequired[AgentHandoffEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Optional[AgentHandoffEntryObject] = "entry" + + type: Optional[AgentHandoffEntryType] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..9033a0a9 --- /dev/null +++ b/src/mistralai/models/agenthandoffstartedevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffStartedEventType = Literal["agent.handoff.started"] + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: NotRequired[AgentHandoffStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..5dbcecc1 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..25f48a62 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 diff --git a/src/mistralai/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/models/agents_api_v1_agents_update_versionop.py new file mode 100644 index 00000000..5e4b97b3 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_update_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_updateop.py b/src/mistralai/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..32696fbe --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + agent_update_request: AgentUpdateRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_update_request: Annotated[ + AgentUpdateRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py new file mode 100644 index 00000000..d2489ffb --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_stream_request: ConversationAppendStreamRequestTypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_stream_request: Annotated[ + ConversationAppendStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_appendop.py b/src/mistralai/models/agents_api_v1_conversations_appendop.py new file mode 100644 index 00000000..ba37697e --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_appendop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_request: ConversationAppendRequestTypedDict + + +class AgentsAPIV1ConversationsAppendRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_request: Annotated[ + ConversationAppendRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..4a800ad6 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + Union[AgentConversation, ModelConversation], +) +r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py new file mode 100644 index 00000000..09fb6081 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..f1d3d579 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, QueryParamMetadata +from typing import Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + +ResponseBodyTypedDict = TypeAliasType( + "ResponseBodyTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +ResponseBody = TypeAliasType( + "ResponseBody", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py new file mode 100644 index 00000000..ade66e5e --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py new file mode 100644 index 00000000..c8fd8475 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): + conversation_id: str + conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + conversation_restart_stream_request: Annotated[ + ConversationRestartStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py new file mode 100644 index 00000000..aa867aff --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): + conversation_id: str + conversation_restart_request: ConversationRestartRequestTypedDict + + +class AgentsAPIV1ConversationsRestartRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + conversation_restart_request: Annotated[ + ConversationRestartRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py new file mode 100644 index 00000000..ebb656d6 --- /dev/null +++ b/src/mistralai/models/agentupdaterequest.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentUpdateRequestToolsTypedDict = TypeAliasType( + "AgentUpdateRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentUpdateRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentUpdateRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + + +class AgentUpdateRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentUpdateRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + ] + nullable_fields = ["instructions", "model", "name", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/builtinconnectors.py b/src/mistralai/models/builtinconnectors.py new file mode 100644 index 00000000..6a3b2476 --- /dev/null +++ b/src/mistralai/models/builtinconnectors.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BuiltInConnectors = Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", +] diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py new file mode 100644 index 00000000..b0fc4d20 --- /dev/null +++ b/src/mistralai/models/codeinterpretertool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CodeInterpreterToolType = Literal["code_interpreter"] + + +class CodeInterpreterToolTypedDict(TypedDict): + type: NotRequired[CodeInterpreterToolType] + + +class CodeInterpreterTool(BaseModel): + type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py new file mode 100644 index 00000000..2c5cf213 --- /dev/null +++ b/src/mistralai/models/completionargs.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[float] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: Optional[float] = 0.3 + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + nullable_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/completionargsstop.py b/src/mistralai/models/completionargsstop.py new file mode 100644 index 00000000..de7a0956 --- /dev/null +++ b/src/mistralai/models/completionargsstop.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import List, Union +from typing_extensions import TypeAliasType + + +CompletionArgsStopTypedDict = TypeAliasType( + "CompletionArgsStopTypedDict", Union[str, List[str]] +) + + +CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py index 7f8bfd91..3932dae3 100644 --- a/src/mistralai/models/completionjobout.py +++ b/src/mistralai/models/completionjobout.py @@ -28,7 +28,7 @@ ] r"""The current status of the fine-tuning job.""" -Object = Literal["job"] +CompletionJobOutObject = Literal["job"] r"""The object type of the fine-tuning job.""" IntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -63,7 +63,7 @@ class CompletionJobOutTypedDict(TypedDict): hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[Object] + object: NotRequired[CompletionJobOutObject] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -105,7 +105,7 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[Object] = "job" + object: Optional[CompletionJobOutObject] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py new file mode 100644 index 00000000..ecc47e45 --- /dev/null +++ b/src/mistralai/models/conversationappendrequest.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal["client", "server"] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..25ffe5fb --- /dev/null +++ b/src/mistralai/models/conversationappendstreamrequest.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py new file mode 100644 index 00000000..8552edda --- /dev/null +++ b/src/mistralai/models/conversationevents.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionStartedEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + FunctionCallEventTypedDict, + MessageOutputEventTypedDict, + ], +) + + +ConversationEventsData = Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py new file mode 100644 index 00000000..d07d7297 --- /dev/null +++ b/src/mistralai/models/conversationhistory.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationHistoryObject = Literal["conversation.history"] + +EntriesTypedDict = TypeAliasType( + "EntriesTypedDict", + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Entries = TypeAliasType( + "Entries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntriesTypedDict] + object: NotRequired[ConversationHistoryObject] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entries] + + object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/models/conversationinputs.py b/src/mistralai/models/conversationinputs.py new file mode 100644 index 00000000..4d30cd76 --- /dev/null +++ b/src/mistralai/models/conversationinputs.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputentries import InputEntries, InputEntriesTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ConversationInputsTypedDict = TypeAliasType( + "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] +) + + +ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py new file mode 100644 index 00000000..9027045b --- /dev/null +++ b/src/mistralai/models/conversationmessages.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationMessagesObject = Literal["conversation.messages"] + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: NotRequired[ConversationMessagesObject] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py new file mode 100644 index 00000000..48cc6fe7 --- /dev/null +++ b/src/mistralai/models/conversationrequest.py @@ -0,0 +1,133 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +HandoffExecution = Literal["client", "server"] + +ToolsTypedDict = TypeAliasType( + "ToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +Tools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[HandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[Nullable[List[ToolsTypedDict]]] + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[HandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: OptionalNullable[List[Tools]] = UNSET + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py new file mode 100644 index 00000000..61de8565 --- /dev/null +++ b/src/mistralai/models/conversationresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationResponseObject = Literal["conversation.response"] + +OutputsTypedDict = TypeAliasType( + "OutputsTypedDict", + Union[ + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Outputs = TypeAliasType( + "Outputs", + Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputsTypedDict] + usage: ConversationUsageInfoTypedDict + object: NotRequired[ConversationResponseObject] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Outputs] + + usage: ConversationUsageInfo + + object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py new file mode 100644 index 00000000..58376140 --- /dev/null +++ b/src/mistralai/models/conversationrestartrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal["client", "server"] + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..f213aea3 --- /dev/null +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py new file mode 100644 index 00000000..a1c21d9a --- /dev/null +++ b/src/mistralai/models/conversationstreamrequest.py @@ -0,0 +1,135 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal["client", "server"] + +ConversationStreamRequestToolsTypedDict = TypeAliasType( + "ConversationStreamRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[Nullable[List[ConversationStreamRequestToolsTypedDict]]] + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: OptionalNullable[List[ConversationStreamRequestTools]] = UNSET + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py new file mode 100644 index 00000000..44ffd5e5 --- /dev/null +++ b/src/mistralai/models/conversationusageinfo.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + nullable_fields = ["connector_tokens", "connectors"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py new file mode 100644 index 00000000..f36de710 --- /dev/null +++ b/src/mistralai/models/documentlibrarytool.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentLibraryToolType = Literal["document_library"] + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + type: NotRequired[DocumentLibraryToolType] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py new file mode 100644 index 00000000..821e7c14 --- /dev/null +++ b/src/mistralai/models/functioncallentry.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEntryObject = Literal["entry"] + +FunctionCallEntryType = Literal["function.call"] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: NotRequired[FunctionCallEntryObject] + type: NotRequired[FunctionCallEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Optional[FunctionCallEntryObject] = "entry" + + type: Optional[FunctionCallEntryType] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/functioncallentryarguments.py b/src/mistralai/models/functioncallentryarguments.py new file mode 100644 index 00000000..ac9e6227 --- /dev/null +++ b/src/mistralai/models/functioncallentryarguments.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType + + +FunctionCallEntryArgumentsTypedDict = TypeAliasType( + "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] +) + + +FunctionCallEntryArguments = TypeAliasType( + "FunctionCallEntryArguments", Union[Dict[str, Any], str] +) diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py new file mode 100644 index 00000000..90b4b226 --- /dev/null +++ b/src/mistralai/models/functioncallevent.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEventType = Literal["function.call.delta"] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: NotRequired[FunctionCallEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Optional[FunctionCallEventType] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py new file mode 100644 index 00000000..64040954 --- /dev/null +++ b/src/mistralai/models/functionresultentry.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionResultEntryObject = Literal["entry"] + +FunctionResultEntryType = Literal["function.result"] + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: NotRequired[FunctionResultEntryObject] + type: NotRequired[FunctionResultEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Optional[FunctionResultEntryObject] = "entry" + + type: Optional[FunctionResultEntryType] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py new file mode 100644 index 00000000..7ce5c464 --- /dev/null +++ b/src/mistralai/models/functiontool.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionToolType = Literal["function"] + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[FunctionToolType] + + +class FunctionTool(BaseModel): + function: Function + + type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py new file mode 100644 index 00000000..27bb2d12 --- /dev/null +++ b/src/mistralai/models/imagegenerationtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ImageGenerationToolType = Literal["image_generation"] + + +class ImageGenerationToolTypedDict(TypedDict): + type: NotRequired[ImageGenerationToolType] + + +class ImageGenerationTool(BaseModel): + type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py new file mode 100644 index 00000000..9c0fea6e --- /dev/null +++ b/src/mistralai/models/inputentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +InputEntriesTypedDict = TypeAliasType( + "InputEntriesTypedDict", + Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], +) + + +InputEntries = TypeAliasType( + "InputEntries", Union[MessageInputEntry, FunctionResultEntry] +) diff --git a/src/mistralai/models/messageentries.py b/src/mistralai/models/messageentries.py new file mode 100644 index 00000000..9b1706de --- /dev/null +++ b/src/mistralai/models/messageentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageEntriesTypedDict = TypeAliasType( + "MessageEntriesTypedDict", + Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], +) + + +MessageEntries = TypeAliasType( + "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] +) diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py new file mode 100644 index 00000000..47704211 --- /dev/null +++ b/src/mistralai/models/messageinputcontentchunks.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk], +) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py new file mode 100644 index 00000000..3d642cdf --- /dev/null +++ b/src/mistralai/models/messageinputentry.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +Object = Literal["entry"] + +MessageInputEntryType = Literal["message.input"] + +MessageInputEntryRole = Literal["assistant", "user"] + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + content: MessageInputEntryContentTypedDict + object: NotRequired[Object] + type: NotRequired[MessageInputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + + content: MessageInputEntryContent + + object: Optional[Object] = "entry" + + type: Optional[MessageInputEntryType] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..e83fb3a9 --- /dev/null +++ b/src/mistralai/models/messageoutputcontentchunks.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + ], +) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py new file mode 100644 index 00000000..abb361e7 --- /dev/null +++ b/src/mistralai/models/messageoutputentry.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryObject = Literal["entry"] + +MessageOutputEntryType = Literal["message.output"] + +MessageOutputEntryRole = Literal["assistant"] + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: NotRequired[MessageOutputEntryObject] + type: NotRequired[MessageOutputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEntryRole] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Optional[MessageOutputEntryObject] = "entry" + + type: Optional[MessageOutputEntryType] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEntryRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "agent_id", + "model", + "role", + ] + nullable_fields = ["completed_at", "agent_id", "model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py new file mode 100644 index 00000000..328874d6 --- /dev/null +++ b/src/mistralai/models/messageoutputevent.py @@ -0,0 +1,93 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventType = Literal["message.output.delta"] + +MessageOutputEventRole = Literal["assistant"] + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: NotRequired[MessageOutputEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEventRole] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Optional[MessageOutputEventType] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEventRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "type", + "created_at", + "output_index", + "content_index", + "model", + "agent_id", + "role", + ] + nullable_fields = ["model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py new file mode 100644 index 00000000..3e927192 --- /dev/null +++ b/src/mistralai/models/modelconversation.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolsTypedDict = TypeAliasType( + "ModelConversationToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ModelConversationTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ModelConversationObject = Literal["conversation"] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + object: NotRequired[ModelConversationObject] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + object: Optional[ModelConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "object", + ] + nullable_fields = ["instructions", "name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py new file mode 100644 index 00000000..6b7e39ea --- /dev/null +++ b/src/mistralai/models/outputcontentchunks.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + ], +) diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py new file mode 100644 index 00000000..296cb430 --- /dev/null +++ b/src/mistralai/models/responsedoneevent.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseDoneEventType = Literal["conversation.response.done"] + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: NotRequired[ResponseDoneEventType] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Optional[ResponseDoneEventType] = "conversation.response.done" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py new file mode 100644 index 00000000..e4190d17 --- /dev/null +++ b/src/mistralai/models/responseerrorevent.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseErrorEventType = Literal["conversation.response.error"] + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: NotRequired[ResponseErrorEventType] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Optional[ResponseErrorEventType] = "conversation.response.error" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py new file mode 100644 index 00000000..6acb483e --- /dev/null +++ b/src/mistralai/models/responsestartedevent.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseStartedEventType = Literal["conversation.response.started"] + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: NotRequired[ResponseStartedEventType] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Optional[ResponseStartedEventType] = "conversation.response.started" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py new file mode 100644 index 00000000..4d15b4f1 --- /dev/null +++ b/src/mistralai/models/ssetypes.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SSETypes = Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..c73d943a --- /dev/null +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionDoneEventType = Literal["tool.execution.done"] + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: BuiltInConnectors + type: NotRequired[ToolExecutionDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: BuiltInConnectors + + type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py new file mode 100644 index 00000000..20c9bf19 --- /dev/null +++ b/src/mistralai/models/toolexecutionentry.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionEntryObject = Literal["entry"] + +ToolExecutionEntryType = Literal["tool.execution"] + + +class ToolExecutionEntryTypedDict(TypedDict): + name: BuiltInConnectors + object: NotRequired[ToolExecutionEntryObject] + type: NotRequired[ToolExecutionEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: BuiltInConnectors + + object: Optional[ToolExecutionEntryObject] = "entry" + + type: Optional[ToolExecutionEntryType] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..e140665e --- /dev/null +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionStartedEventType = Literal["tool.execution.started"] + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: BuiltInConnectors + type: NotRequired[ToolExecutionStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: BuiltInConnectors + + type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py new file mode 100644 index 00000000..1d28e2db --- /dev/null +++ b/src/mistralai/models/toolfilechunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolFileChunkType = Literal["tool_file"] + + +class ToolFileChunkTypedDict(TypedDict): + tool: BuiltInConnectors + file_id: str + type: NotRequired[ToolFileChunkType] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: BuiltInConnectors + + file_id: str + + type: Optional[ToolFileChunkType] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "file_name", "file_type"] + nullable_fields = ["file_name", "file_type"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py new file mode 100644 index 00000000..84f72696 --- /dev/null +++ b/src/mistralai/models/toolreferencechunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolReferenceChunkType = Literal["tool_reference"] + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: BuiltInConnectors + title: str + type: NotRequired[ToolReferenceChunkType] + url: NotRequired[Nullable[str]] + source: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: BuiltInConnectors + + title: str + + type: Optional[ToolReferenceChunkType] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + source: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "url", "source"] + nullable_fields = ["url", "source"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py new file mode 100644 index 00000000..70fc5626 --- /dev/null +++ b/src/mistralai/models/websearchpremiumtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchPremiumToolType = Literal["web_search_premium"] + + +class WebSearchPremiumToolTypedDict(TypedDict): + type: NotRequired[WebSearchPremiumToolType] + + +class WebSearchPremiumTool(BaseModel): + type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py new file mode 100644 index 00000000..3dfd1c53 --- /dev/null +++ b/src/mistralai/models/websearchtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchToolType = Literal["web_search"] + + +class WebSearchToolTypedDict(TypedDict): + type: NotRequired[WebSearchToolType] + + +class WebSearchTool(BaseModel): + type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e801eaf3..5414436d 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -10,6 +10,7 @@ from mistralai._hooks import SDKHooks from mistralai.agents import Agents from mistralai.batch import Batch +from mistralai.beta import Beta from mistralai.chat import Chat from mistralai.classifiers import Classifiers from mistralai.embeddings import Embeddings @@ -28,6 +29,7 @@ class Mistral(BaseSDK): models: Models r"""Model Management API""" + beta: Beta files: Files r"""Files API""" fine_tuning: FineTuning @@ -142,6 +144,7 @@ def __init__( def _init_sdks(self): self.models = Models(self.sdk_configuration) + self.beta = Beta(self.sdk_configuration) self.files = Files(self.sdk_configuration) self.fine_tuning = FineTuning(self.sdk_configuration) self.batch = Batch(self.sdk_configuration)