From 76559edce0cf2d4d64ebf87480fb32f81e4bb579 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 16 Apr 2025 18:36:30 +0000 Subject: [PATCH 1/3] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 --- .speakeasy/gen.lock | 127 +++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 2 + RELEASES.md | 12 +- docs/models/chatclassificationrequest.md | 9 + .../models/chatclassificationrequestinputs.md | 19 + docs/models/chatmoderationrequest.md | 3 +- docs/models/classificationobject.md | 9 - docs/models/classificationresponse.md | 10 +- docs/models/classificationtargetresult.md | 8 + ...djobout.md => classifierdetailedjobout.md} | 14 +- .../classifierdetailedjoboutintegrations.md | 11 + .../models/classifierdetailedjoboutjobtype.md | 8 + ...t.md => classifierdetailedjoboutobject.md} | 2 +- ...s.md => classifierdetailedjoboutstatus.md} | 2 +- docs/models/classifierftmodelout.md | 21 + docs/models/classifierftmodeloutmodeltype.md | 8 + ...bject.md => classifierftmodeloutobject.md} | 2 +- docs/models/classifierjobout.md | 23 + ...ons.md => classifierjoboutintegrations.md} | 2 +- docs/models/classifierjoboutjobtype.md | 10 + docs/models/classifierjoboutobject.md | 10 + docs/models/classifierjoboutstatus.md | 19 + docs/models/classifiertargetin.md | 11 + docs/models/classifiertargetout.md | 11 + docs/models/classifiertrainingparameters.md | 13 + docs/models/classifiertrainingparametersin.md | 15 + docs/models/completiondetailedjobout.md | 26 + .../completiondetailedjoboutintegrations.md | 11 + .../models/completiondetailedjoboutjobtype.md | 8 + docs/models/completiondetailedjoboutobject.md | 8 + ...> completiondetailedjoboutrepositories.md} | 2 +- docs/models/completiondetailedjoboutstatus.md | 17 + docs/models/completionftmodelout.md | 20 + docs/models/completionftmodeloutobject.md | 8 + .../models/{jobout.md => completionjobout.md} | 10 +- ...ers.md => completiontrainingparameters.md} | 6 +- ...n.md => completiontrainingparametersin.md} | 6 +- docs/models/finetuneablemodeltype.md | 9 + docs/models/ftclassifierlossfunction.md | 9 + docs/models/ftmodelcapabilitiesout.md | 3 +- docs/models/ftmodelout.md | 19 - docs/models/hyperparameters.md | 17 + docs/models/instructrequest.md | 8 + docs/models/instructrequestmessages.md | 29 ++ docs/models/jobin.md | 7 +- .../jobsapiroutesbatchgetbatchjobsrequest.md | 18 +- ...esfinetuningcancelfinetuningjobresponse.md | 19 + ...esfinetuningcreatefinetuningjobresponse.md | 4 +- ...outesfinetuninggetfinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobsrequest.md | 1 + ...tesfinetuningstartfinetuningjobresponse.md | 19 + ...sfinetuningupdatefinetunedmodelresponse.md | 19 + docs/models/jobsout.md | 2 +- docs/models/jobsoutdata.md | 17 + docs/models/jobtype.md | 10 + docs/models/modeltype.md | 8 + docs/models/moderationobject.md | 9 + docs/models/moderationresponse.md | 10 + docs/models/response1.md | 17 + docs/models/wandbintegrationout.md | 3 +- docs/sdks/classifiers/README.md | 106 +++- docs/sdks/jobs/README.md | 18 +- docs/sdks/mistraljobs/README.md | 20 +- docs/sdks/models/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/classifiers.py | 460 +++++++++++++++++- src/mistralai/jobs.py | 122 +++-- src/mistralai/mistral_jobs.py | 4 +- src/mistralai/models/__init__.py | 231 +++++++-- src/mistralai/models/archiveftmodelout.py | 14 +- src/mistralai/models/batchjobout.py | 12 +- src/mistralai/models/batchjobsout.py | 12 +- .../models/chatclassificationrequest.py | 22 + .../models/chatclassificationrequestinputs.py | 19 + src/mistralai/models/chatmoderationrequest.py | 11 +- .../models/classificationresponse.py | 21 +- .../models/classificationtargetresult.py | 14 + .../models/classifierdetailedjobout.py | 156 ++++++ src/mistralai/models/classifierftmodelout.py | 101 ++++ src/mistralai/models/classifierjobout.py | 165 +++++++ src/mistralai/models/classifiertargetin.py | 55 +++ src/mistralai/models/classifiertargetout.py | 24 + .../models/classifiertrainingparameters.py | 73 +++ .../models/classifiertrainingparametersin.py | 85 ++++ ...djobout.py => completiondetailedjobout.py} | 68 +-- ...{ftmodelout.py => completionftmodelout.py} | 24 +- .../models/{jobout.py => completionjobout.py} | 49 +- ...ers.py => completiontrainingparameters.py} | 14 +- ...n.py => completiontrainingparametersin.py} | 14 +- src/mistralai/models/finetuneablemodeltype.py | 7 + .../models/ftclassifierlossfunction.py | 7 + .../models/ftmodelcapabilitiesout.py | 3 + src/mistralai/models/githubrepositoryin.py | 14 +- src/mistralai/models/githubrepositoryout.py | 14 +- src/mistralai/models/instructrequest.py | 42 ++ src/mistralai/models/jobin.py | 64 ++- .../jobs_api_routes_batch_get_batch_jobsop.py | 6 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 31 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 25 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 31 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 8 + ...tes_fine_tuning_start_fine_tuning_jobop.py | 31 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 30 +- src/mistralai/models/jobsout.py | 37 +- src/mistralai/models/legacyjobmetadataout.py | 15 +- ...ificationobject.py => moderationobject.py} | 12 +- src/mistralai/models/moderationresponse.py | 21 + src/mistralai/models/unarchiveftmodelout.py | 14 +- src/mistralai/models/wandbintegration.py | 14 +- src/mistralai/models/wandbintegrationout.py | 21 +- src/mistralai/models_.py | 14 +- 114 files changed, 2688 insertions(+), 488 deletions(-) create mode 100644 docs/models/chatclassificationrequest.md create mode 100644 docs/models/chatclassificationrequestinputs.md delete mode 100644 docs/models/classificationobject.md create mode 100644 docs/models/classificationtargetresult.md rename docs/models/{detailedjobout.md => classifierdetailedjobout.md} (92%) create mode 100644 docs/models/classifierdetailedjoboutintegrations.md create mode 100644 docs/models/classifierdetailedjoboutjobtype.md rename docs/models/{detailedjoboutobject.md => classifierdetailedjoboutobject.md} (66%) rename docs/models/{detailedjoboutstatus.md => classifierdetailedjoboutstatus.md} (95%) create mode 100644 docs/models/classifierftmodelout.md create mode 100644 docs/models/classifierftmodeloutmodeltype.md rename docs/models/{ftmodeloutobject.md => classifierftmodeloutobject.md} (72%) create mode 100644 docs/models/classifierjobout.md rename docs/models/{detailedjoboutintegrations.md => classifierjoboutintegrations.md} (80%) create mode 100644 docs/models/classifierjoboutjobtype.md create mode 100644 docs/models/classifierjoboutobject.md create mode 100644 docs/models/classifierjoboutstatus.md create mode 100644 docs/models/classifiertargetin.md create mode 100644 docs/models/classifiertargetout.md create mode 100644 docs/models/classifiertrainingparameters.md create mode 100644 docs/models/classifiertrainingparametersin.md create mode 100644 docs/models/completiondetailedjobout.md create mode 100644 docs/models/completiondetailedjoboutintegrations.md create mode 100644 docs/models/completiondetailedjoboutjobtype.md create mode 100644 docs/models/completiondetailedjoboutobject.md rename docs/models/{detailedjoboutrepositories.md => completiondetailedjoboutrepositories.md} (76%) create mode 100644 docs/models/completiondetailedjoboutstatus.md create mode 100644 docs/models/completionftmodelout.md create mode 100644 docs/models/completionftmodeloutobject.md rename docs/models/{jobout.md => completionjobout.md} (98%) rename docs/models/{trainingparameters.md => completiontrainingparameters.md} (97%) rename docs/models/{trainingparametersin.md => completiontrainingparametersin.md} (99%) create mode 100644 docs/models/finetuneablemodeltype.md create mode 100644 docs/models/ftclassifierlossfunction.md delete mode 100644 docs/models/ftmodelout.md create mode 100644 docs/models/hyperparameters.md create mode 100644 docs/models/instructrequest.md create mode 100644 docs/models/instructrequestmessages.md create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md create mode 100644 docs/models/jobsoutdata.md create mode 100644 docs/models/jobtype.md create mode 100644 docs/models/modeltype.md create mode 100644 docs/models/moderationobject.md create mode 100644 docs/models/moderationresponse.md create mode 100644 docs/models/response1.md create mode 100644 src/mistralai/models/chatclassificationrequest.py create mode 100644 src/mistralai/models/chatclassificationrequestinputs.py create mode 100644 src/mistralai/models/classificationtargetresult.py create mode 100644 src/mistralai/models/classifierdetailedjobout.py create mode 100644 src/mistralai/models/classifierftmodelout.py create mode 100644 src/mistralai/models/classifierjobout.py create mode 100644 src/mistralai/models/classifiertargetin.py create mode 100644 src/mistralai/models/classifiertargetout.py create mode 100644 src/mistralai/models/classifiertrainingparameters.py create mode 100644 src/mistralai/models/classifiertrainingparametersin.py rename src/mistralai/models/{detailedjobout.py => completiondetailedjobout.py} (69%) rename src/mistralai/models/{ftmodelout.py => completionftmodelout.py} (81%) rename src/mistralai/models/{jobout.py => completionjobout.py} (89%) rename src/mistralai/models/{trainingparameters.py => completiontrainingparameters.py} (95%) rename src/mistralai/models/{trainingparametersin.py => completiontrainingparametersin.py} (97%) create mode 100644 src/mistralai/models/finetuneablemodeltype.py create mode 100644 src/mistralai/models/ftclassifierlossfunction.py create mode 100644 src/mistralai/models/instructrequest.py rename src/mistralai/models/{classificationobject.py => moderationobject.py} (65%) create mode 100644 src/mistralai/models/moderationresponse.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index fc748eb0..8f91267d 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 292a97e4dd465554d420c92d78d70c5f + docChecksum: 2c14111bcd5cc5624575faefc3e5ddae docVersion: 0.0.2 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 1cfb4e3f53a140213b7b400e79811fe5 + releaseVersion: 1.7.0 + configChecksum: d52ab0a71ab9e0798da08262c59bf31d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -68,6 +68,8 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md + - docs/models/chatclassificationrequest.md + - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -79,14 +81,42 @@ generatedFiles: - docs/models/chatmoderationrequest.md - docs/models/chatmoderationrequestinputs.md - docs/models/checkpointout.md - - docs/models/classificationobject.md - docs/models/classificationrequest.md - docs/models/classificationrequestinputs.md - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md - docs/models/completionresponsestreamchoice.md - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md - docs/models/content.md - docs/models/contentchunk.md - docs/models/data.md @@ -94,11 +124,6 @@ generatedFiles: - docs/models/deletemodelout.md - docs/models/deletemodelv1modelsmodeliddeleterequest.md - docs/models/deltamessage.md - - docs/models/detailedjobout.md - - docs/models/detailedjoboutintegrations.md - - docs/models/detailedjoboutobject.md - - docs/models/detailedjoboutrepositories.md - - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md @@ -121,12 +146,12 @@ generatedFiles: - docs/models/fimcompletionresponse.md - docs/models/fimcompletionstreamrequest.md - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md - docs/models/finishreason.md + - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - docs/models/ftmodelcardtype.md - - docs/models/ftmodelout.md - - docs/models/ftmodeloutobject.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functionname.md @@ -135,30 +160,38 @@ generatedFiles: - docs/models/githubrepositoryout.md - docs/models/githubrepositoryouttype.md - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md - docs/models/imageurl.md - docs/models/imageurlchunk.md - docs/models/imageurlchunkimageurl.md - docs/models/imageurlchunktype.md - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestmessages.md - docs/models/integrations.md - docs/models/jobin.md - docs/models/jobinintegrations.md - docs/models/jobinrepositories.md - docs/models/jobmetadataout.md - - docs/models/jobout.md - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md - docs/models/jobsapiroutesbatchgetbatchjobrequest.md - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md - docs/models/jobsout.md + - docs/models/jobsoutdata.md - docs/models/jobsoutobject.md + - docs/models/jobtype.md - docs/models/jsonschema.md - docs/models/legacyjobmetadataout.md - docs/models/legacyjobmetadataoutobject.md @@ -168,6 +201,9 @@ generatedFiles: - docs/models/metricout.md - docs/models/modelcapabilities.md - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md - docs/models/object.md - docs/models/ocrimageobject.md - docs/models/ocrpagedimensions.md @@ -181,6 +217,7 @@ generatedFiles: - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md + - docs/models/response1.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/retrievefileout.md @@ -205,8 +242,6 @@ generatedFiles: - docs/models/toolmessagerole.md - docs/models/tooltypes.md - docs/models/trainingfile.md - - docs/models/trainingparameters.md - - docs/models/trainingparametersin.md - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md @@ -270,24 +305,37 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/chatclassificationrequest.py + - src/mistralai/models/chatclassificationrequestinputs.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py - src/mistralai/models/chatmoderationrequest.py - src/mistralai/models/checkpointout.py - - src/mistralai/models/classificationobject.py - src/mistralai/models/classificationrequest.py - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py - src/mistralai/models/contentchunk.py - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - src/mistralai/models/deletefileout.py - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py - - src/mistralai/models/detailedjobout.py - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py @@ -305,9 +353,10 @@ generatedFiles: - src/mistralai/models/fimcompletionrequest.py - src/mistralai/models/fimcompletionresponse.py - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py - src/mistralai/models/ftmodelcapabilitiesout.py - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/ftmodelout.py - src/mistralai/models/function.py - src/mistralai/models/functioncall.py - src/mistralai/models/functionname.py @@ -316,9 +365,9 @@ generatedFiles: - src/mistralai/models/httpvalidationerror.py - src/mistralai/models/imageurl.py - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/instructrequest.py - src/mistralai/models/jobin.py - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/jobout.py - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -337,6 +386,8 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py - src/mistralai/models/ocrimageobject.py - src/mistralai/models/ocrpagedimensions.py - src/mistralai/models/ocrpageobject.py @@ -362,8 +413,6 @@ generatedFiles: - src/mistralai/models/toolmessage.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py - - src/mistralai/models/trainingparameters.py - - src/mistralai/models/trainingparametersin.py - src/mistralai/models/unarchiveftmodelout.py - src/mistralai/models/updateftmodelin.py - src/mistralai/models/uploadfileout.py @@ -429,7 +478,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": false, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false}, "max_context_length": 32768, "job": "5fa7f0e7-432c-4e47-acb6-0cc78135ddeb"} + application/json: {"id": "", "object": "model", "created": 597129, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "fa7f0e74-32ce-447c-9b60-cc78135ddeb8", "model_type": "completion"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: "": parameters: @@ -437,7 +486,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "archived": true} + application/json: {"id": "", "object": "model", "archived": true} jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: "": parameters: @@ -445,7 +494,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "archived": false} + application/json: {"id": "", "object": "model", "archived": false} files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: @@ -509,7 +558,7 @@ examples: jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: - application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} + application/json: {"model": "Fiesta", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} responses: "200": application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} @@ -520,7 +569,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "888f7743-e7c1-4351-b8c6-b985523c4bcb", "auto_start": true, "model": "2", "status": "CANCELLATION_REQUESTED", "created_at": 444836, "modified_at": 424256, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 550563, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -528,7 +577,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "f7135029-2331-4c6e-bbdc-570b7edb4966", "auto_start": true, "model": "A4", "status": "CANCELLATION_REQUESTED", "created_at": 703131, "modified_at": 929437, "training_files": ["e3e32613-5744-4d82-8f3f-d6b3c11eb45e"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 466651, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": ["", "", ""], "weight": 687.66, "loss_function": "single_class"}, {"name": "", "labels": ["", "", ""], "weight": 8470.22, "loss_function": "multi_class"}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -536,7 +585,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "bf0f9e6c-3e5d-461a-ac80-e36dcac0dfc8", "auto_start": true, "model": "Explorer", "status": "RUNNING", "created_at": 961967, "modified_at": 914446, "training_files": ["82c4783e-31ec-471d-bbed-4c90a1b0dd73"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 590686, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": [""], "weight": 5494.15, "loss_function": "single_class"}, {"name": "", "labels": ["", ""], "weight": 7945.15, "loss_function": "single_class"}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -636,7 +685,7 @@ examples: application/json: {"model": "V90", "input": [""]} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "V90", "results": [{}]} "422": application/json: {} moderations_chat_v1_chat_moderations_post: @@ -660,10 +709,28 @@ examples: chat_moderations_v1_chat_moderations_post: speakeasy-default-chat-moderations-v1-chat-moderations-post: requestBody: - application/json: {"model": "Model Y", "input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "truncate_for_context_length": false} + application/json: {"input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "model": "Model Y"} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Model Y", "results": [{}, {}]} + "422": + application/json: {} + classifications_v1_classifications_post: + speakeasy-default-classifications-v1-classifications-post: + requestBody: + application/json: {"model": "Altima", "input": ""} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Camaro", "results": [{"key": {"scores": {"key": 6063.42, "key1": 1739.44}}, "key1": {"scores": {}}}, {"key": {"scores": {"key": 2625.67}}, "key1": {"scores": {}}}]} + "422": + application/json: {} + chat_classifications_v1_chat_classifications_post: + speakeasy-default-chat-classifications-v1-chat-classifications-post: + requestBody: + application/json: {"model": "Fortwo", "inputs": [{"messages": [{"prefix": false, "role": "assistant"}, {"prefix": false, "role": "assistant"}]}]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} "422": application/json: {} examplesVersion: 1.0.0 diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 3bc90dff..4bf0297c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.6.0 + version: 1.7.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index b4c6af57..1b29132d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f - sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 + sourceRevisionDigest: sha256:2bf3d26638f594c87cbc903f32b1d5c101d01bca4b92a63bb8ce3dd9c3bf49e6 + sourceBlobDigest: sha256:f395b0bda941385b5f2782ffba1261bfad5730f4975dcb6ff71592ae34662c25 tags: - latest - - speakeasy-sdk-regen-1742466858 + - speakeasy-sdk-regen-1744819913 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f - sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 + sourceRevisionDigest: sha256:2bf3d26638f594c87cbc903f32b1d5c101d01bca4b92a63bb8ce3dd9c3bf49e6 + sourceBlobDigest: sha256:f395b0bda941385b5f2782ffba1261bfad5730f4975dcb6ff71592ae34662c25 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:4de7cac024939b19dfba3601531280e278d2d8188dc063827694bda3992666fc + codeSamplesRevisionDigest: sha256:4d37afd772178799966e64c0a4b19b48e689ce1e235a8902be7eed5ffe8dad58 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/README.md b/README.md index 6bfeae52..d5b265b6 100644 --- a/README.md +++ b/README.md @@ -441,6 +441,8 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations * [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations +* [classify](docs/sdks/classifiers/README.md#classify) - Classifications +* [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications ### [embeddings](docs/sdks/embeddings/README.md) diff --git a/RELEASES.md b/RELEASES.md index 4827ac51..629e92d9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -188,4 +188,14 @@ Based on: ### Generated - [python v1.6.0] . ### Releases -- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . \ No newline at end of file +- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . + +## 2025-04-16 18:35:19 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] . +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md new file mode 100644 index 00000000..53a05601 --- /dev/null +++ b/docs/models/chatclassificationrequest.md @@ -0,0 +1,9 @@ +# ChatClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatclassificationrequestinputs.md new file mode 100644 index 00000000..18214f0b --- /dev/null +++ b/docs/models/chatclassificationrequestinputs.md @@ -0,0 +1,19 @@ +# ChatClassificationRequestInputs + +Chat to classify + + +## Supported Types + +### `models.InstructRequest` + +```python +value: models.InstructRequest = /* values here */ +``` + +### `List[models.InstructRequest]` + +```python +value: List[models.InstructRequest] = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md index 2b8f46cb..69b6c1dc 100644 --- a/docs/models/chatmoderationrequest.md +++ b/docs/models/chatmoderationrequest.md @@ -5,6 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | N/A | | `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classificationobject.md b/docs/models/classificationobject.md deleted file mode 100644 index 68f2e2b2..00000000 --- a/docs/models/classificationobject.md +++ /dev/null @@ -1,9 +0,0 @@ -# ClassificationObject - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Classifier result thresholded | -| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Classifier result | \ No newline at end of file diff --git a/docs/models/classificationresponse.md b/docs/models/classificationresponse.md index 4765ff62..d1633ae7 100644 --- a/docs/models/classificationresponse.md +++ b/docs/models/classificationresponse.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | -| `model` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `results` | List[[models.ClassificationObject](../models/classificationobject.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[Dict[str, [models.ClassificationTargetResult](../models/classificationtargetresult.md)]] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/classificationtargetresult.md b/docs/models/classificationtargetresult.md new file mode 100644 index 00000000..f3b10727 --- /dev/null +++ b/docs/models/classificationtargetresult.md @@ -0,0 +1,8 @@ +# ClassificationTargetResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `scores` | Dict[str, *float*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/classifierdetailedjobout.md similarity index 92% rename from docs/models/detailedjobout.md rename to docs/models/classifierdetailedjobout.md index f7470327..99227c01 100644 --- a/docs/models/detailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -1,4 +1,4 @@ -# DetailedJobOut +# ClassifierDetailedJobOut ## Fields @@ -7,20 +7,20 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.DetailedJobOutStatus](../models/detailedjoboutstatus.md) | :heavy_check_mark: | N/A | -| `job_type` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.DetailedJobOutObject]](../models/detailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.DetailedJobOutIntegrations](../models/detailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierDetailedJobOutIntegrations](../models/classifierdetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.DetailedJobOutRepositories](../models/detailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegrations.md b/docs/models/classifierdetailedjoboutintegrations.md new file mode 100644 index 00000000..5a09465e --- /dev/null +++ b/docs/models/classifierdetailedjoboutintegrations.md @@ -0,0 +1,11 @@ +# ClassifierDetailedJobOutIntegrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjoboutjobtype.md b/docs/models/classifierdetailedjoboutjobtype.md new file mode 100644 index 00000000..0d1c6573 --- /dev/null +++ b/docs/models/classifierdetailedjoboutjobtype.md @@ -0,0 +1,8 @@ +# ClassifierDetailedJobOutJobType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/detailedjoboutobject.md b/docs/models/classifierdetailedjoboutobject.md similarity index 66% rename from docs/models/detailedjoboutobject.md rename to docs/models/classifierdetailedjoboutobject.md index 3731b1f6..08cbcffc 100644 --- a/docs/models/detailedjoboutobject.md +++ b/docs/models/classifierdetailedjoboutobject.md @@ -1,4 +1,4 @@ -# DetailedJobOutObject +# ClassifierDetailedJobOutObject ## Values diff --git a/docs/models/detailedjoboutstatus.md b/docs/models/classifierdetailedjoboutstatus.md similarity index 95% rename from docs/models/detailedjoboutstatus.md rename to docs/models/classifierdetailedjoboutstatus.md index 955d5a26..c3118aaf 100644 --- a/docs/models/detailedjoboutstatus.md +++ b/docs/models/classifierdetailedjoboutstatus.md @@ -1,4 +1,4 @@ -# DetailedJobOutStatus +# ClassifierDetailedJobOutStatus ## Values diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md new file mode 100644 index 00000000..406102cf --- /dev/null +++ b/docs/models/classifierftmodelout.md @@ -0,0 +1,21 @@ +# ClassifierFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutmodeltype.md b/docs/models/classifierftmodeloutmodeltype.md new file mode 100644 index 00000000..e1e7e465 --- /dev/null +++ b/docs/models/classifierftmodeloutmodeltype.md @@ -0,0 +1,8 @@ +# ClassifierFTModelOutModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/ftmodeloutobject.md b/docs/models/classifierftmodeloutobject.md similarity index 72% rename from docs/models/ftmodeloutobject.md rename to docs/models/classifierftmodeloutobject.md index e12b214e..9fe05bcf 100644 --- a/docs/models/ftmodeloutobject.md +++ b/docs/models/classifierftmodeloutobject.md @@ -1,4 +1,4 @@ -# FTModelOutObject +# ClassifierFTModelOutObject ## Values diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md new file mode 100644 index 00000000..5fa290c1 --- /dev/null +++ b/docs/models/classifierjobout.md @@ -0,0 +1,23 @@ +# ClassifierJobOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | \ No newline at end of file diff --git a/docs/models/detailedjoboutintegrations.md b/docs/models/classifierjoboutintegrations.md similarity index 80% rename from docs/models/detailedjoboutintegrations.md rename to docs/models/classifierjoboutintegrations.md index 46beabc1..d938d0b9 100644 --- a/docs/models/detailedjoboutintegrations.md +++ b/docs/models/classifierjoboutintegrations.md @@ -1,4 +1,4 @@ -# DetailedJobOutIntegrations +# ClassifierJobOutIntegrations ## Supported Types diff --git a/docs/models/classifierjoboutjobtype.md b/docs/models/classifierjoboutjobtype.md new file mode 100644 index 00000000..7f5236fa --- /dev/null +++ b/docs/models/classifierjoboutjobtype.md @@ -0,0 +1,10 @@ +# ClassifierJobOutJobType + +The type of job (`FT` for fine-tuning). + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierjoboutobject.md b/docs/models/classifierjoboutobject.md new file mode 100644 index 00000000..1b42d547 --- /dev/null +++ b/docs/models/classifierjoboutobject.md @@ -0,0 +1,10 @@ +# ClassifierJobOutObject + +The object type of the fine-tuning job. + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierjoboutstatus.md b/docs/models/classifierjoboutstatus.md new file mode 100644 index 00000000..4520f164 --- /dev/null +++ b/docs/models/classifierjoboutstatus.md @@ -0,0 +1,19 @@ +# ClassifierJobOutStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifiertargetin.md b/docs/models/classifiertargetin.md new file mode 100644 index 00000000..78cab67b --- /dev/null +++ b/docs/models/classifiertargetin.md @@ -0,0 +1,11 @@ +# ClassifierTargetIn + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `loss_function` | [OptionalNullable[models.FTClassifierLossFunction]](../models/ftclassifierlossfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetout.md b/docs/models/classifiertargetout.md new file mode 100644 index 00000000..57535ae5 --- /dev/null +++ b/docs/models/classifiertargetout.md @@ -0,0 +1,11 @@ +# ClassifierTargetOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *float* | :heavy_check_mark: | N/A | +| `loss_function` | [models.FTClassifierLossFunction](../models/ftclassifierlossfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparameters.md b/docs/models/classifiertrainingparameters.md new file mode 100644 index 00000000..3b6f3be6 --- /dev/null +++ b/docs/models/classifiertrainingparameters.md @@ -0,0 +1,13 @@ +# ClassifierTrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparametersin.md b/docs/models/classifiertrainingparametersin.md new file mode 100644 index 00000000..1287c973 --- /dev/null +++ b/docs/models/classifiertrainingparametersin.md @@ -0,0 +1,15 @@ +# ClassifierTrainingParametersIn + +The fine-tuning hyperparameter settings used in a classifier fine-tune job. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md new file mode 100644 index 00000000..b42dd419 --- /dev/null +++ b/docs/models/completiondetailedjobout.md @@ -0,0 +1,26 @@ +# CompletionDetailedJobOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionDetailedJobOutIntegrations](../models/completiondetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutintegrations.md b/docs/models/completiondetailedjoboutintegrations.md new file mode 100644 index 00000000..af6bbcc5 --- /dev/null +++ b/docs/models/completiondetailedjoboutintegrations.md @@ -0,0 +1,11 @@ +# CompletionDetailedJobOutIntegrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/completiondetailedjoboutjobtype.md b/docs/models/completiondetailedjoboutjobtype.md new file mode 100644 index 00000000..fb24db0c --- /dev/null +++ b/docs/models/completiondetailedjoboutjobtype.md @@ -0,0 +1,8 @@ +# CompletionDetailedJobOutJobType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutobject.md b/docs/models/completiondetailedjoboutobject.md new file mode 100644 index 00000000..1bec88e5 --- /dev/null +++ b/docs/models/completiondetailedjoboutobject.md @@ -0,0 +1,8 @@ +# CompletionDetailedJobOutObject + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/detailedjoboutrepositories.md b/docs/models/completiondetailedjoboutrepositories.md similarity index 76% rename from docs/models/detailedjoboutrepositories.md rename to docs/models/completiondetailedjoboutrepositories.md index 4b32079a..4f9727c3 100644 --- a/docs/models/detailedjoboutrepositories.md +++ b/docs/models/completiondetailedjoboutrepositories.md @@ -1,4 +1,4 @@ -# DetailedJobOutRepositories +# CompletionDetailedJobOutRepositories ## Supported Types diff --git a/docs/models/completiondetailedjoboutstatus.md b/docs/models/completiondetailedjoboutstatus.md new file mode 100644 index 00000000..b80525ba --- /dev/null +++ b/docs/models/completiondetailedjoboutstatus.md @@ -0,0 +1,17 @@ +# CompletionDetailedJobOutStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md new file mode 100644 index 00000000..ca1c5289 --- /dev/null +++ b/docs/models/completionftmodelout.md @@ -0,0 +1,20 @@ +# CompletionFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodeloutobject.md b/docs/models/completionftmodeloutobject.md new file mode 100644 index 00000000..6f9d858c --- /dev/null +++ b/docs/models/completionftmodeloutobject.md @@ -0,0 +1,8 @@ +# CompletionFTModelOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobout.md b/docs/models/completionjobout.md similarity index 98% rename from docs/models/jobout.md rename to docs/models/completionjobout.md index 652c9d16..381aeb94 100644 --- a/docs/models/jobout.md +++ b/docs/models/completionjobout.md @@ -1,4 +1,4 @@ -# JobOut +# CompletionJobOut ## Fields @@ -7,18 +7,18 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | -| `job_type` | *str* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | | `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparameters.md b/docs/models/completiontrainingparameters.md similarity index 97% rename from docs/models/trainingparameters.md rename to docs/models/completiontrainingparameters.md index e56df8e9..4746a95d 100644 --- a/docs/models/trainingparameters.md +++ b/docs/models/completiontrainingparameters.md @@ -1,4 +1,4 @@ -# TrainingParameters +# CompletionTrainingParameters ## Fields @@ -10,5 +10,5 @@ | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/completiontrainingparametersin.md similarity index 99% rename from docs/models/trainingparametersin.md rename to docs/models/completiontrainingparametersin.md index 64c31a44..9fcc714e 100644 --- a/docs/models/trainingparametersin.md +++ b/docs/models/completiontrainingparametersin.md @@ -1,4 +1,4 @@ -# TrainingParametersIn +# CompletionTrainingParametersIn The fine-tuning hyperparameter settings used in a fine-tune job. @@ -12,5 +12,5 @@ The fine-tuning hyperparameter settings used in a fine-tune job. | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/finetuneablemodeltype.md b/docs/models/finetuneablemodeltype.md new file mode 100644 index 00000000..34b24bd4 --- /dev/null +++ b/docs/models/finetuneablemodeltype.md @@ -0,0 +1,9 @@ +# FineTuneableModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/ftclassifierlossfunction.md b/docs/models/ftclassifierlossfunction.md new file mode 100644 index 00000000..919cdd38 --- /dev/null +++ b/docs/models/ftclassifierlossfunction.md @@ -0,0 +1,9 @@ +# FTClassifierLossFunction + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `SINGLE_CLASS` | single_class | +| `MULTI_CLASS` | multi_class | \ No newline at end of file diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/ftmodelcapabilitiesout.md index 3cb52377..19690476 100644 --- a/docs/models/ftmodelcapabilitiesout.md +++ b/docs/models/ftmodelcapabilitiesout.md @@ -8,4 +8,5 @@ | `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelout.md b/docs/models/ftmodelout.md deleted file mode 100644 index 6dec7156..00000000 --- a/docs/models/ftmodelout.md +++ /dev/null @@ -1,19 +0,0 @@ -# FTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.FTModelOutObject]](../models/ftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md new file mode 100644 index 00000000..46a6dd6b --- /dev/null +++ b/docs/models/hyperparameters.md @@ -0,0 +1,17 @@ +# Hyperparameters + + +## Supported Types + +### `models.CompletionTrainingParametersIn` + +```python +value: models.CompletionTrainingParametersIn = /* values here */ +``` + +### `models.ClassifierTrainingParametersIn` + +```python +value: models.ClassifierTrainingParametersIn = /* values here */ +``` + diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md new file mode 100644 index 00000000..9500cb58 --- /dev/null +++ b/docs/models/instructrequest.md @@ -0,0 +1,8 @@ +# InstructRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessages](../models/instructrequestmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestmessages.md b/docs/models/instructrequestmessages.md new file mode 100644 index 00000000..9c866a7d --- /dev/null +++ b/docs/models/instructrequestmessages.md @@ -0,0 +1,29 @@ +# InstructRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/jobin.md b/docs/models/jobin.md index 6fd661cf..d6cbd27a 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -6,10 +6,13 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.TrainingParametersIn](../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | | `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | -| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | \ No newline at end of file +| `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index 93de090e..f2a3bb78 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | [OptionalNullable[models.BatchJobStatus]](../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md new file mode 100644 index 00000000..1b331662 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md index dd12c71c..eeddc3cd 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -5,10 +5,10 @@ OK ## Supported Types -### `models.JobOut` +### `models.Response1` ```python -value: models.JobOut = /* values here */ +value: models.Response1 = /* values here */ ``` ### `models.LegacyJobMetadataOut` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md new file mode 100644 index 00000000..e0d2e361 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md index 9d25d79c..3dca3cd8 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -9,6 +9,7 @@ | `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | | `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | | `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md new file mode 100644 index 00000000..64f4cca6 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md new file mode 100644 index 00000000..54f4c398 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse + +OK + + +## Supported Types + +### `models.ClassifierFTModelOut` + +```python +value: models.ClassifierFTModelOut = /* values here */ +``` + +### `models.CompletionFTModelOut` + +```python +value: models.CompletionFTModelOut = /* values here */ +``` + diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index 99ff75ec..d71793ef 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | `total` | *int* | :heavy_check_mark: | N/A | -| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | +| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutdata.md b/docs/models/jobsoutdata.md new file mode 100644 index 00000000..28cec311 --- /dev/null +++ b/docs/models/jobsoutdata.md @@ -0,0 +1,17 @@ +# JobsOutData + + +## Supported Types + +### `models.ClassifierJobOut` + +```python +value: models.ClassifierJobOut = /* values here */ +``` + +### `models.CompletionJobOut` + +```python +value: models.CompletionJobOut = /* values here */ +``` + diff --git a/docs/models/jobtype.md b/docs/models/jobtype.md new file mode 100644 index 00000000..847c6622 --- /dev/null +++ b/docs/models/jobtype.md @@ -0,0 +1,10 @@ +# JobType + +The type of job (`FT` for fine-tuning). + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/modeltype.md b/docs/models/modeltype.md new file mode 100644 index 00000000..a31c3ca0 --- /dev/null +++ b/docs/models/modeltype.md @@ -0,0 +1,8 @@ +# ModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/moderationobject.md b/docs/models/moderationobject.md new file mode 100644 index 00000000..320b2ab4 --- /dev/null +++ b/docs/models/moderationobject.md @@ -0,0 +1,9 @@ +# ModerationObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Moderation result thresholds | +| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Moderation result | \ No newline at end of file diff --git a/docs/models/moderationresponse.md b/docs/models/moderationresponse.md new file mode 100644 index 00000000..75a5eec7 --- /dev/null +++ b/docs/models/moderationresponse.md @@ -0,0 +1,10 @@ +# ModerationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[[models.ModerationObject](../models/moderationobject.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/response1.md b/docs/models/response1.md new file mode 100644 index 00000000..2e73fdbb --- /dev/null +++ b/docs/models/response1.md @@ -0,0 +1,17 @@ +# Response1 + + +## Supported Types + +### `models.ClassifierJobOut` + +```python +value: models.ClassifierJobOut = /* values here */ +``` + +### `models.CompletionJobOut` + +```python +value: models.CompletionJobOut = /* values here */ +``` + diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index b9a3a86d..f924b636 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -8,4 +8,5 @@ | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index cbe409bb..12d472e0 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -9,6 +9,8 @@ Classifiers API. * [moderate](#moderate) - Moderations * [moderate_chat](#moderate_chat) - Chat Moderations +* [classify](#classify) - Classifications +* [classify_chat](#classify_chat) - Chat Classifications ## moderate @@ -44,7 +46,7 @@ with Mistral( ### Response -**[models.ClassificationResponse](../../models/classificationresponse.md)** +**[models.ModerationResponse](../../models/moderationresponse.md)** ### Errors @@ -68,7 +70,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate_chat(model="Model Y", inputs=[ + res = mistral.classifiers.moderate_chat(inputs=[ [ { "content": [ @@ -114,7 +116,7 @@ with Mistral( "role": "user", }, ], - ]) + ], model="Model Y") # Handle response print(res) @@ -125,13 +127,107 @@ with Mistral( | Parameter | Type | Required | Description | | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | | `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response +**[models.ModerationResponse](../../models/moderationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## classify + +Classifications + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify(model="Altima", inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## classify_chat + +Chat Classifications + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify_chat(model="Fortwo", inputs=[ + { + "messages": [ + { + "prefix": False, + "role": "assistant", + }, + { + "prefix": False, + "role": "assistant", + }, + ], + }, + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + **[models.ClassificationResponse](../../models/classificationresponse.md)** ### Errors diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index ecb11def..75d3b57d 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -41,6 +41,7 @@ with Mistral( | `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | | `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | | `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | @@ -73,7 +74,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + "learning_rate": 0.0001, + }) # Handle response print(res) @@ -85,13 +88,16 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.TrainingParametersIn](../../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | | `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -135,7 +141,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** ### Errors @@ -174,7 +180,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** ### Errors @@ -213,7 +219,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** ### Errors diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 5b80a45b..e2dac8b4 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -34,16 +34,16 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | [OptionalNullable[models.BatchJobStatus]](../../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index dd7baf50..d7a5ed85 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -166,7 +166,7 @@ with Mistral( ### Response -**[models.FTModelOut](../../models/ftmodelout.md)** +**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** ### Errors diff --git a/pyproject.toml b/pyproject.toml index 6307fc75..2da8b5ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.6.0" +version = "1.7.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 9507529b..7b151c78 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.6.0" +__version__: str = "1.7.0" __openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index d85961f3..91be11c1 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -23,7 +23,7 @@ def moderate( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Moderations :param model: ID of the model to use. @@ -91,7 +91,7 @@ def moderate( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -129,7 +129,7 @@ async def moderate_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Moderations :param model: ID of the model to use. @@ -197,7 +197,7 @@ async def moderate_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -226,22 +226,20 @@ async def moderate_async( def moderate_chat( self, *, - model: str, inputs: Union[ models.ChatModerationRequestInputs, models.ChatModerationRequestInputsTypedDict, ], - truncate_for_context_length: Optional[bool] = False, + model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Chat Moderations - :param model: :param inputs: Chat to classify - :param truncate_for_context_length: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -258,9 +256,8 @@ def moderate_chat( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - model=model, inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - truncate_for_context_length=truncate_for_context_length, + model=model, ) req = self._build_request( @@ -306,7 +303,7 @@ def moderate_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -335,22 +332,20 @@ def moderate_chat( async def moderate_chat_async( self, *, - model: str, inputs: Union[ models.ChatModerationRequestInputs, models.ChatModerationRequestInputsTypedDict, ], - truncate_for_context_length: Optional[bool] = False, + model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Chat Moderations - :param model: :param inputs: Chat to classify - :param truncate_for_context_length: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -367,9 +362,8 @@ async def moderate_chat_async( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - model=model, inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - truncate_for_context_length=truncate_for_context_length, + model=model, ) req = self._build_request_async( @@ -413,6 +407,434 @@ async def moderate_chat_async( retry_config=retry_config, ) + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ModerationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def classify( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def classify_async( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def classify_chat( + self, + *, + model: str, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + ) + + req = self._build_request( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def classify_chat_async( + self, + *, + model: str, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 675ece0b..76d9f41a 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -17,6 +17,7 @@ def list( page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[models.QueryParamStatus] = UNSET, wandb_project: OptionalNullable[str] = UNSET, @@ -35,6 +36,7 @@ def list( :param page_size: The number of items to return per page. :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. :param status: The current job state to filter on. When set, the other results are not displayed. :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. @@ -60,6 +62,7 @@ def list( page_size=page_size, model=model, created_after=created_after, + created_before=created_before, created_by_me=created_by_me, status=status, wandb_project=wandb_project, @@ -134,6 +137,7 @@ async def list_async( page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[models.QueryParamStatus] = UNSET, wandb_project: OptionalNullable[str] = UNSET, @@ -152,6 +156,7 @@ async def list_async( :param page_size: The number of items to return per page. :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. :param status: The current job state to filter on. When set, the other results are not displayed. :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. @@ -177,6 +182,7 @@ async def list_async( page_size=page_size, model=model, created_after=created_after, + created_before=created_before, created_by_me=created_by_me, status=status, wandb_project=wandb_project, @@ -248,9 +254,7 @@ def create( self, *, model: str, - hyperparameters: Union[ - models.TrainingParametersIn, models.TrainingParametersInTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, @@ -261,12 +265,20 @@ def create( List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] ] ] = UNSET, - repositories: Optional[ + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ Union[ List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] ] - ] = None, - auto_start: Optional[bool] = None, + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models.ClassifierTargetIn], + List[models.ClassifierTargetInTypedDict], + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -277,13 +289,16 @@ def create( Create a new fine-tuning job, it will be queued for processing. :param model: The name of the model to fine-tune. - :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -305,17 +320,22 @@ def create( training_files, Optional[List[models.TrainingFile]] ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.TrainingParametersIn - ), suffix=suffix, integrations=utils.get_pydantic_model( integrations, OptionalNullable[List[models.JobInIntegrations]] ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), repositories=utils.get_pydantic_model( - repositories, Optional[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] ), - auto_start=auto_start, ) req = self._build_request( @@ -387,9 +407,7 @@ async def create_async( self, *, model: str, - hyperparameters: Union[ - models.TrainingParametersIn, models.TrainingParametersInTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, @@ -400,12 +418,20 @@ async def create_async( List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] ] ] = UNSET, - repositories: Optional[ + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ Union[ List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] ] - ] = None, - auto_start: Optional[bool] = None, + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models.ClassifierTargetIn], + List[models.ClassifierTargetInTypedDict], + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -416,13 +442,16 @@ async def create_async( Create a new fine-tuning job, it will be queued for processing. :param model: The name of the model to fine-tune. - :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -444,17 +473,22 @@ async def create_async( training_files, Optional[List[models.TrainingFile]] ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.TrainingParametersIn - ), suffix=suffix, integrations=utils.get_pydantic_model( integrations, OptionalNullable[List[models.JobInIntegrations]] ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), repositories=utils.get_pydantic_model( - repositories, Optional[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] ), - auto_start=auto_start, ) req = self._build_request_async( @@ -530,7 +564,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -594,7 +628,9 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -623,7 +659,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -687,7 +723,9 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -716,7 +754,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -780,7 +818,9 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -809,7 +849,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -873,7 +913,9 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -902,7 +944,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -966,7 +1008,9 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -995,7 +1039,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -1059,7 +1103,9 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index e0d3c616..32a40aa7 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -19,7 +19,7 @@ def list( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.BatchJobStatus] = UNSET, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -130,7 +130,7 @@ async def list_async( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.BatchJobStatus] = UNSET, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0750906a..fa756356 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -39,6 +39,14 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus +from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, +) +from .chatclassificationrequestinputs import ( + ChatClassificationRequestInputs, + ChatClassificationRequestInputsTypedDict, +) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -79,7 +87,6 @@ TwoTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classificationobject import ClassificationObject, ClassificationObjectTypedDict from .classificationrequest import ( ClassificationRequest, ClassificationRequestInputs, @@ -90,13 +97,87 @@ ClassificationResponse, ClassificationResponseTypedDict, ) +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutIntegrations, + ClassifierDetailedJobOutIntegrationsTypedDict, + ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutObject, + ClassifierDetailedJobOutStatus, + ClassifierDetailedJobOutTypedDict, +) +from .classifierftmodelout import ( + ClassifierFTModelOut, + ClassifierFTModelOutModelType, + ClassifierFTModelOutObject, + ClassifierFTModelOutTypedDict, +) +from .classifierjobout import ( + ClassifierJobOut, + ClassifierJobOutIntegrations, + ClassifierJobOutIntegrationsTypedDict, + ClassifierJobOutJobType, + ClassifierJobOutObject, + ClassifierJobOutStatus, + ClassifierJobOutTypedDict, +) +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutIntegrations, + CompletionDetailedJobOutIntegrationsTypedDict, + CompletionDetailedJobOutJobType, + CompletionDetailedJobOutObject, + CompletionDetailedJobOutRepositories, + CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutStatus, + CompletionDetailedJobOutTypedDict, +) from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionftmodelout import ( + CompletionFTModelOut, + CompletionFTModelOutObject, + CompletionFTModelOutTypedDict, + ModelType, +) +from .completionjobout import ( + CompletionJobOut, + CompletionJobOutTypedDict, + Integrations, + IntegrationsTypedDict, + JobType, + Object, + Repositories, + RepositoriesTypedDict, + Status, +) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .delete_model_v1_models_model_id_deleteop import ( DeleteModelV1ModelsModelIDDeleteRequest, @@ -105,16 +186,6 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .detailedjobout import ( - DetailedJobOut, - DetailedJobOutIntegrations, - DetailedJobOutIntegrationsTypedDict, - DetailedJobOutObject, - DetailedJobOutRepositories, - DetailedJobOutRepositoriesTypedDict, - DetailedJobOutStatus, - DetailedJobOutTypedDict, -) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -171,12 +242,13 @@ FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict, ) +from .finetuneablemodeltype import FineTuneableModelType +from .ftclassifierlossfunction import FTClassifierLossFunction from .ftmodelcapabilitiesout import ( FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict -from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -204,7 +276,15 @@ ImageURLChunkType, ImageURLChunkTypedDict, ) +from .instructrequest import ( + InstructRequest, + InstructRequestMessages, + InstructRequestMessagesTypedDict, + InstructRequestTypedDict, +) from .jobin import ( + Hyperparameters, + HyperparametersTypedDict, JobIn, JobInIntegrations, JobInIntegrationsTypedDict, @@ -213,16 +293,6 @@ JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobout import ( - Integrations, - IntegrationsTypedDict, - JobOut, - JobOutTypedDict, - Object, - Repositories, - RepositoriesTypedDict, - Status, -) from .jobs_api_routes_batch_cancel_batch_jobop import ( JobsAPIRoutesBatchCancelBatchJobRequest, JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, @@ -242,14 +312,20 @@ from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response1, + Response1TypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( JobsAPIRoutesFineTuningGetFineTuningJobsRequest, @@ -259,6 +335,8 @@ from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, @@ -267,8 +345,16 @@ from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, +) +from .jobsout import ( + JobsOut, + JobsOutData, + JobsOutDataTypedDict, + JobsOutObject, + JobsOutTypedDict, ) -from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, @@ -279,6 +365,8 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from .moderationresponse import ModerationResponse, ModerationResponseTypedDict from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -321,8 +409,6 @@ ) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict -from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict from .unarchiveftmodelout import ( UnarchiveFTModelOut, UnarchiveFTModelOutObject, @@ -397,6 +483,10 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", + "ChatClassificationRequest", + "ChatClassificationRequestInputs", + "ChatClassificationRequestInputsTypedDict", + "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -419,21 +509,65 @@ "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", - "ClassificationObject", - "ClassificationObjectTypedDict", "ClassificationRequest", "ClassificationRequestInputs", "ClassificationRequestInputsTypedDict", "ClassificationRequestTypedDict", "ClassificationResponse", "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierDetailedJobOut", + "ClassifierDetailedJobOutIntegrations", + "ClassifierDetailedJobOutIntegrationsTypedDict", + "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutObject", + "ClassifierDetailedJobOutStatus", + "ClassifierDetailedJobOutTypedDict", + "ClassifierFTModelOut", + "ClassifierFTModelOutModelType", + "ClassifierFTModelOutObject", + "ClassifierFTModelOutTypedDict", + "ClassifierJobOut", + "ClassifierJobOutIntegrations", + "ClassifierJobOutIntegrationsTypedDict", + "ClassifierJobOutJobType", + "ClassifierJobOutObject", + "ClassifierJobOutStatus", + "ClassifierJobOutTypedDict", + "ClassifierTargetIn", + "ClassifierTargetInTypedDict", + "ClassifierTargetOut", + "ClassifierTargetOutTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersIn", + "ClassifierTrainingParametersInTypedDict", + "ClassifierTrainingParametersTypedDict", "CompletionChunk", "CompletionChunkTypedDict", + "CompletionDetailedJobOut", + "CompletionDetailedJobOutIntegrations", + "CompletionDetailedJobOutIntegrationsTypedDict", + "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutObject", + "CompletionDetailedJobOutRepositories", + "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutStatus", + "CompletionDetailedJobOutTypedDict", "CompletionEvent", "CompletionEventTypedDict", + "CompletionFTModelOut", + "CompletionFTModelOutObject", + "CompletionFTModelOutTypedDict", + "CompletionJobOut", + "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersIn", + "CompletionTrainingParametersInTypedDict", + "CompletionTrainingParametersTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", @@ -448,14 +582,6 @@ "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", - "DetailedJobOut", - "DetailedJobOutIntegrations", - "DetailedJobOutIntegrationsTypedDict", - "DetailedJobOutObject", - "DetailedJobOutRepositories", - "DetailedJobOutRepositoriesTypedDict", - "DetailedJobOutStatus", - "DetailedJobOutTypedDict", "Document", "DocumentTypedDict", "DocumentURLChunk", @@ -479,14 +605,12 @@ "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", "FTModelCardType", "FTModelCardTypedDict", - "FTModelOut", - "FTModelOutObject", - "FTModelOutTypedDict", "File", "FilePurpose", "FileSchema", @@ -506,6 +630,7 @@ "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FineTuneableModelType", "FinishReason", "Function", "FunctionCall", @@ -521,6 +646,8 @@ "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "Hyperparameters", + "HyperparametersTypedDict", "ImageURL", "ImageURLChunk", "ImageURLChunkImageURL", @@ -530,6 +657,10 @@ "ImageURLTypedDict", "Inputs", "InputsTypedDict", + "InstructRequest", + "InstructRequestMessages", + "InstructRequestMessagesTypedDict", + "InstructRequestTypedDict", "Integrations", "IntegrationsTypedDict", "JSONSchema", @@ -542,8 +673,7 @@ "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobOut", - "JobOutTypedDict", + "JobType", "JobsAPIRoutesBatchCancelBatchJobRequest", "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", "JobsAPIRoutesBatchGetBatchJobRequest", @@ -554,19 +684,29 @@ "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", "JobsOut", + "JobsOutData", + "JobsOutDataTypedDict", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", @@ -584,6 +724,11 @@ "ModelCapabilitiesTypedDict", "ModelList", "ModelListTypedDict", + "ModelType", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -607,6 +752,8 @@ "ReferenceChunkTypedDict", "Repositories", "RepositoriesTypedDict", + "Response1", + "Response1TypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", @@ -647,10 +794,6 @@ "ToolTypes", "TrainingFile", "TrainingFileTypedDict", - "TrainingParameters", - "TrainingParametersIn", - "TrainingParametersInTypedDict", - "TrainingParametersTypedDict", "Two", "TwoTypedDict", "Type", diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index e78e98c4..cff27c4e 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict ArchiveFTModelOutObject = Literal["model"] @@ -14,18 +11,13 @@ class ArchiveFTModelOutTypedDict(TypedDict): id: str - object: ArchiveFTModelOutObject + object: NotRequired[ArchiveFTModelOutObject] archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[ - Annotated[ - Optional[ArchiveFTModelOutObject], AfterValidator(validate_const("model")) - ], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[ArchiveFTModelOutObject] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 677284f2..bf873f41 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -4,12 +4,9 @@ from .batcherror import BatchError, BatchErrorTypedDict from .batchjobstatus import BatchJobStatus from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict BatchJobOutObject = Literal["batch"] @@ -27,7 +24,7 @@ class BatchJobOutTypedDict(TypedDict): completed_requests: int succeeded_requests: int failed_requests: int - object: BatchJobOutObject + object: NotRequired[BatchJobOutObject] metadata: NotRequired[Nullable[Dict[str, Any]]] output_file: NotRequired[Nullable[str]] error_file: NotRequired[Nullable[str]] @@ -58,10 +55,7 @@ class BatchJobOut(BaseModel): failed_requests: int - OBJECT: Annotated[ - Annotated[Optional[BatchJobOutObject], AfterValidator(validate_const("batch"))], - pydantic.Field(alias="object"), - ] = "batch" + object: Optional[BatchJobOutObject] = "batch" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py index f8c63a33..8ce26f31 100644 --- a/src/mistralai/models/batchjobsout.py +++ b/src/mistralai/models/batchjobsout.py @@ -3,11 +3,8 @@ from __future__ import annotations from .batchjobout import BatchJobOut, BatchJobOutTypedDict from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict BatchJobsOutObject = Literal["list"] @@ -16,7 +13,7 @@ class BatchJobsOutTypedDict(TypedDict): total: int data: NotRequired[List[BatchJobOutTypedDict]] - object: BatchJobsOutObject + object: NotRequired[BatchJobsOutObject] class BatchJobsOut(BaseModel): @@ -24,7 +21,4 @@ class BatchJobsOut(BaseModel): data: Optional[List[BatchJobOut]] = None - OBJECT: Annotated[ - Annotated[Optional[BatchJobsOutObject], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" + object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py new file mode 100644 index 00000000..b71bd2ea --- /dev/null +++ b/src/mistralai/models/chatclassificationrequest.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatclassificationrequestinputs import ( + ChatClassificationRequestInputs, + ChatClassificationRequestInputsTypedDict, +) +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + inputs: ChatClassificationRequestInputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + inputs: ChatClassificationRequestInputs + r"""Chat to classify""" diff --git a/src/mistralai/models/chatclassificationrequestinputs.py b/src/mistralai/models/chatclassificationrequestinputs.py new file mode 100644 index 00000000..86a2699a --- /dev/null +++ b/src/mistralai/models/chatclassificationrequestinputs.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .instructrequest import InstructRequest, InstructRequestTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ChatClassificationRequestInputsTypedDict = TypeAliasType( + "ChatClassificationRequestInputsTypedDict", + Union[InstructRequestTypedDict, List[InstructRequestTypedDict]], +) +r"""Chat to classify""" + + +ChatClassificationRequestInputs = TypeAliasType( + "ChatClassificationRequestInputs", Union[InstructRequest, List[InstructRequest]] +) +r"""Chat to classify""" diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py index 5b25b877..2f58d52f 100644 --- a/src/mistralai/models/chatmoderationrequest.py +++ b/src/mistralai/models/chatmoderationrequest.py @@ -9,8 +9,8 @@ from mistralai.utils import get_discriminator import pydantic from pydantic import Discriminator, Tag -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict TwoTypedDict = TypeAliasType( @@ -71,16 +71,13 @@ class ChatModerationRequestTypedDict(TypedDict): - model: str inputs: ChatModerationRequestInputsTypedDict r"""Chat to classify""" - truncate_for_context_length: NotRequired[bool] + model: str class ChatModerationRequest(BaseModel): - model: str - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] r"""Chat to classify""" - truncate_for_context_length: Optional[bool] = False + model: str diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py index 5716db42..b7741f37 100644 --- a/src/mistralai/models/classificationresponse.py +++ b/src/mistralai/models/classificationresponse.py @@ -1,21 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Dict, List +from typing_extensions import TypedDict class ClassificationResponseTypedDict(TypedDict): - id: NotRequired[str] - model: NotRequired[str] - results: NotRequired[List[ClassificationObjectTypedDict]] + id: str + model: str + results: List[Dict[str, ClassificationTargetResultTypedDict]] class ClassificationResponse(BaseModel): - id: Optional[str] = None + id: str - model: Optional[str] = None + model: str - results: Optional[List[ClassificationObject]] = None + results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/models/classificationtargetresult.py b/src/mistralai/models/classificationtargetresult.py new file mode 100644 index 00000000..60c5a51b --- /dev/null +++ b/src/mistralai/models/classificationtargetresult.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ClassificationTargetResultTypedDict(TypedDict): + scores: Dict[str, float] + + +class ClassificationTargetResult(BaseModel): + scores: Dict[str, float] diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py new file mode 100644 index 00000000..971d529f --- /dev/null +++ b/src/mistralai/models/classifierdetailedjobout.py @@ -0,0 +1,156 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + +ClassifierDetailedJobOutObject = Literal["job"] + +ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierDetailedJobOutIntegrations = WandbIntegrationOut + + +ClassifierDetailedJobOutJobType = Literal["classifier"] + + +class ClassifierDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetOutTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[ClassifierDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierDetailedJobOutJobType] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class ClassifierDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetOut] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[ClassifierDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py new file mode 100644 index 00000000..846a20a2 --- /dev/null +++ b/src/mistralai/models/classifierftmodelout.py @@ -0,0 +1,101 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierFTModelOutObject = Literal["model"] + +ClassifierFTModelOutModelType = Literal["classifier"] + + +class ClassifierFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + classifier_targets: List[ClassifierTargetOutTypedDict] + object: NotRequired[ClassifierFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ClassifierFTModelOutModelType] + + +class ClassifierFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + root: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + classifier_targets: List[ClassifierTargetOut] + + object: Optional[ClassifierFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py new file mode 100644 index 00000000..66011b4a --- /dev/null +++ b/src/mistralai/models/classifierjobout.py @@ -0,0 +1,165 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + +ClassifierJobOutObject = Literal["job"] +r"""The object type of the fine-tuning job.""" + +ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierJobOutIntegrations = WandbIntegrationOut + + +ClassifierJobOutJobType = Literal["classifier"] +r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[ClassifierJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierJobOutJobType] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[ClassifierJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierJobOutJobType] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py new file mode 100644 index 00000000..c9e4b406 --- /dev/null +++ b/src/mistralai/models/classifiertargetin.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetInTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTargetIn(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["weight", "loss_function"] + nullable_fields = ["loss_function"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertargetout.py b/src/mistralai/models/classifiertargetout.py new file mode 100644 index 00000000..ddc587f4 --- /dev/null +++ b/src/mistralai/models/classifiertargetout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetOutTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetOut(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py new file mode 100644 index 00000000..f0908e81 --- /dev/null +++ b/src/mistralai/models/classifiertrainingparameters.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py new file mode 100644 index 00000000..f1f16cfb --- /dev/null +++ b/src/mistralai/models/classifiertrainingparametersin.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/completiondetailedjobout.py similarity index 69% rename from src/mistralai/models/detailedjobout.py rename to src/mistralai/models/completiondetailedjobout.py index b2a1c8d9..598a5e20 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/completiondetailedjobout.py @@ -2,21 +2,21 @@ from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) from .eventout import EventOut, EventOutTypedDict from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict -DetailedJobOutStatus = Literal[ +CompletionDetailedJobOutStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -29,57 +29,57 @@ "CANCELLATION_REQUESTED", ] -DetailedJobOutObject = Literal["job"] +CompletionDetailedJobOutObject = Literal["job"] -DetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict -DetailedJobOutIntegrations = WandbIntegrationOut +CompletionDetailedJobOutIntegrations = WandbIntegrationOut -DetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict +CompletionDetailedJobOutJobType = Literal["completion"] +CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict -DetailedJobOutRepositories = GithubRepositoryOut +CompletionDetailedJobOutRepositories = GithubRepositoryOut -class DetailedJobOutTypedDict(TypedDict): + +class CompletionDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool - hyperparameters: TrainingParametersTypedDict model: str r"""The name of the model to fine-tune.""" - status: DetailedJobOutStatus - job_type: str + status: CompletionDetailedJobOutStatus created_at: int modified_at: int training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] - object: DetailedJobOutObject + object: NotRequired[CompletionDetailedJobOutObject] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] - integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]] + integrations: NotRequired[ + Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + ] trained_tokens: NotRequired[Nullable[int]] - repositories: NotRequired[List[DetailedJobOutRepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[CompletionDetailedJobOutJobType] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] -class DetailedJobOut(BaseModel): +class CompletionDetailedJobOut(BaseModel): id: str auto_start: bool - hyperparameters: TrainingParameters - model: str r"""The name of the model to fine-tune.""" - status: DetailedJobOutStatus - - job_type: str + status: CompletionDetailedJobOutStatus created_at: int @@ -87,27 +87,26 @@ class DetailedJobOut(BaseModel): training_files: List[str] + hyperparameters: CompletionTrainingParameters + validation_files: OptionalNullable[List[str]] = UNSET - OBJECT: Annotated[ - Annotated[ - Optional[DetailedJobOutObject], AfterValidator(validate_const("job")) - ], - pydantic.Field(alias="object"), - ] = "job" + object: Optional[CompletionDetailedJobOutObject] = "job" fine_tuned_model: OptionalNullable[str] = UNSET suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET trained_tokens: OptionalNullable[int] = UNSET - repositories: Optional[List[DetailedJobOutRepositories]] = None - metadata: OptionalNullable[JobMetadataOut] = UNSET + job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + + repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -122,8 +121,9 @@ def serialize_model(self, handler): "suffix", "integrations", "trained_tokens", - "repositories", "metadata", + "job_type", + "repositories", "events", "checkpoints", ] diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/completionftmodelout.py similarity index 81% rename from src/mistralai/models/ftmodelout.py rename to src/mistralai/models/completionftmodelout.py index e8d6864c..71ab1a45 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -6,18 +6,17 @@ FTModelCapabilitiesOutTypedDict, ) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict -FTModelOutObject = Literal["model"] +CompletionFTModelOutObject = Literal["model"] +ModelType = Literal["completion"] -class FTModelOutTypedDict(TypedDict): + +class CompletionFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str @@ -25,14 +24,15 @@ class FTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str - object: FTModelOutObject + object: NotRequired[CompletionFTModelOutObject] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] + model_type: NotRequired[ModelType] -class FTModelOut(BaseModel): +class CompletionFTModelOut(BaseModel): id: str created: int @@ -47,10 +47,7 @@ class FTModelOut(BaseModel): job: str - OBJECT: Annotated[ - Annotated[Optional[FTModelOutObject], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[CompletionFTModelOutObject] = "model" name: OptionalNullable[str] = UNSET @@ -60,6 +57,8 @@ class FTModelOut(BaseModel): aliases: Optional[List[str]] = None + model_type: Optional[ModelType] = "completion" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -68,6 +67,7 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", + "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/completionjobout.py similarity index 89% rename from src/mistralai/models/jobout.py rename to src/mistralai/models/completionjobout.py index c3ffb248..7f8bfd91 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/completionjobout.py @@ -1,17 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict Status = Literal[ @@ -37,32 +37,33 @@ Integrations = WandbIntegrationOut +JobType = Literal["completion"] +r"""The type of job (`FT` for fine-tuning).""" + RepositoriesTypedDict = GithubRepositoryOutTypedDict Repositories = GithubRepositoryOut -class JobOutTypedDict(TypedDict): +class CompletionJobOutTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool - hyperparameters: TrainingParametersTypedDict model: str r"""The name of the model to fine-tune.""" status: Status r"""The current status of the fine-tuning job.""" - job_type: str - r"""The type of job (`FT` for fine-tuning).""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" modified_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Object + object: NotRequired[Object] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -72,27 +73,24 @@ class JobOutTypedDict(TypedDict): r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - repositories: NotRequired[List[RepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[JobType] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[RepositoriesTypedDict]] -class JobOut(BaseModel): +class CompletionJobOut(BaseModel): id: str r"""The ID of the job.""" auto_start: bool - hyperparameters: TrainingParameters - model: str r"""The name of the model to fine-tune.""" status: Status r"""The current status of the fine-tuning job.""" - job_type: str - r"""The type of job (`FT` for fine-tuning).""" - created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -102,13 +100,12 @@ class JobOut(BaseModel): training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParameters + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ - Annotated[Optional[Object], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" + object: Optional[Object] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET @@ -123,10 +120,13 @@ class JobOut(BaseModel): trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - repositories: Optional[List[Repositories]] = None - metadata: OptionalNullable[JobMetadataOut] = UNSET + job_type: Optional[JobType] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[Repositories]] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -136,8 +136,9 @@ def serialize_model(self, handler): "suffix", "integrations", "trained_tokens", - "repositories", "metadata", + "job_type", + "repositories", ] nullable_fields = [ "validation_files", diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/completiontrainingparameters.py similarity index 95% rename from src/mistralai/models/trainingparameters.py rename to src/mistralai/models/completiontrainingparameters.py index cc2b037a..33b21ec9 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/completiontrainingparameters.py @@ -7,17 +7,17 @@ from typing_extensions import NotRequired, TypedDict -class TrainingParametersTypedDict(TypedDict): +class CompletionTrainingParametersTypedDict(TypedDict): training_steps: NotRequired[Nullable[int]] learning_rate: NotRequired[float] weight_decay: NotRequired[Nullable[float]] warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] - fim_ratio: NotRequired[Nullable[float]] seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] -class TrainingParameters(BaseModel): +class CompletionTrainingParameters(BaseModel): training_steps: OptionalNullable[int] = UNSET learning_rate: Optional[float] = 0.0001 @@ -28,10 +28,10 @@ class TrainingParameters(BaseModel): epochs: OptionalNullable[float] = UNSET - fim_ratio: OptionalNullable[float] = UNSET - seq_len: OptionalNullable[int] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -40,16 +40,16 @@ def serialize_model(self, handler): "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] nullable_fields = [ "training_steps", "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] null_default_fields = [] diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py similarity index 97% rename from src/mistralai/models/trainingparametersin.py rename to src/mistralai/models/completiontrainingparametersin.py index 7d2e414b..92f8d99a 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/completiontrainingparametersin.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -class TrainingParametersInTypedDict(TypedDict): +class CompletionTrainingParametersInTypedDict(TypedDict): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" training_steps: NotRequired[Nullable[int]] @@ -19,11 +19,11 @@ class TrainingParametersInTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] - fim_ratio: NotRequired[Nullable[float]] seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] -class TrainingParametersIn(BaseModel): +class CompletionTrainingParametersIn(BaseModel): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" training_steps: OptionalNullable[int] = UNSET @@ -40,10 +40,10 @@ class TrainingParametersIn(BaseModel): epochs: OptionalNullable[float] = UNSET - fim_ratio: OptionalNullable[float] = UNSET - seq_len: OptionalNullable[int] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -52,16 +52,16 @@ def serialize_model(self, handler): "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] nullable_fields = [ "training_steps", "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] null_default_fields = [] diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py new file mode 100644 index 00000000..3507dc91 --- /dev/null +++ b/src/mistralai/models/finetuneablemodeltype.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal["completion", "classifier"] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..df2d19ff --- /dev/null +++ b/src/mistralai/models/ftclassifierlossfunction.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FTClassifierLossFunction = Literal["single_class", "multi_class"] diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index b5e1e521..7f3aa18b 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -11,6 +11,7 @@ class FTModelCapabilitiesOutTypedDict(TypedDict): completion_fim: NotRequired[bool] function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] + classification: NotRequired[bool] class FTModelCapabilitiesOut(BaseModel): @@ -21,3 +22,5 @@ class FTModelCapabilitiesOut(BaseModel): function_calling: Optional[bool] = False fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 715db6b7..eda4ee0b 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict GithubRepositoryInType = Literal["github"] @@ -17,7 +14,7 @@ class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str - type: GithubRepositoryInType + type: NotRequired[GithubRepositoryInType] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -29,12 +26,7 @@ class GithubRepositoryIn(BaseModel): token: str - TYPE: Annotated[ - Annotated[ - Optional[GithubRepositoryInType], AfterValidator(validate_const("github")) - ], - pydantic.Field(alias="type"), - ] = "github" + type: Optional[GithubRepositoryInType] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 5a0ce31a..72213b6f 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict GithubRepositoryOutType = Literal["github"] @@ -17,7 +14,7 @@ class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str - type: GithubRepositoryOutType + type: NotRequired[GithubRepositoryOutType] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -29,12 +26,7 @@ class GithubRepositoryOut(BaseModel): commit_id: str - TYPE: Annotated[ - Annotated[ - Optional[GithubRepositoryOutType], AfterValidator(validate_const("github")) - ], - pydantic.Field(alias="type"), - ] = "github" + type: Optional[GithubRepositoryOutType] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/instructrequest.py b/src/mistralai/models/instructrequest.py new file mode 100644 index 00000000..dddbda00 --- /dev/null +++ b/src/mistralai/models/instructrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessagesTypedDict = TypeAliasType( + "InstructRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessagesTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessages] diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index 0ef66da3..cb535e46 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .trainingfile import TrainingFile, TrainingFileTypedDict -from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict JobInIntegrationsTypedDict = WandbIntegrationTypedDict @@ -17,6 +26,20 @@ JobInIntegrations = WandbIntegration +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ + ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict + ], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", + Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], +) + + JobInRepositoriesTypedDict = GithubRepositoryInTypedDict @@ -26,8 +49,7 @@ class JobInTypedDict(TypedDict): model: str r"""The name of the model to fine-tune.""" - hyperparameters: TrainingParametersInTypedDict - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" @@ -35,17 +57,19 @@ class JobInTypedDict(TypedDict): r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] r"""A list of integrations to enable for your fine-tuning job.""" - repositories: NotRequired[List[JobInRepositoriesTypedDict]] auto_start: NotRequired[bool] r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] class JobIn(BaseModel): model: str r"""The name of the model to fine-tune.""" - hyperparameters: TrainingParametersIn - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + hyperparameters: Hyperparameters training_files: Optional[List[TrainingFile]] = None @@ -58,11 +82,17 @@ class JobIn(BaseModel): integrations: OptionalNullable[List[JobInIntegrations]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" - repositories: Optional[List[JobInRepositories]] = None - auto_start: Optional[bool] = None r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[JobInRepositories]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -70,10 +100,20 @@ def serialize_model(self, handler): "validation_files", "suffix", "integrations", - "repositories", "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + nullable_fields = [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", ] - nullable_fields = ["validation_files", "suffix", "integrations"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py index 8f0c66ca..fa2c6ed3 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -6,7 +6,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -17,7 +17,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] - status: NotRequired[Nullable[BatchJobStatus]] + status: NotRequired[Nullable[List[BatchJobStatus]]] class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): @@ -52,7 +52,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): ] = False status: Annotated[ - OptionalNullable[BatchJobStatus], + OptionalNullable[List[BatchJobStatus]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index b72ff42f..ceb19a69 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): @@ -16,3 +26,20 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index d7a5d10d..39af3ea6 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -1,21 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jobout import JobOut, JobOutTypedDict +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import TypeAliasType +from typing_extensions import Annotated, TypeAliasType + + +Response1TypedDict = TypeAliasType( + "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +Response1 = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict], + Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], ) r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, JobOut], + Union[LegacyJobMetadataOut, Response1], ) r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index 896d34f5..be99dd2d 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): @@ -16,3 +26,20 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index b51b1958..710436c9 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -33,6 +33,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" created_after: NotRequired[Nullable[datetime]] r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: NotRequired[Nullable[QueryParamStatus]] @@ -70,6 +71,11 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): ] = UNSET r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + created_by_me: Annotated[ Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -107,6 +113,7 @@ def serialize_model(self, handler): "page_size", "model", "created_after", + "created_before", "created_by_me", "status", "wandb_project", @@ -116,6 +123,7 @@ def serialize_model(self, handler): nullable_fields = [ "model", "created_after", + "created_before", "status", "wandb_project", "wandb_name", diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index 3e7989a7..8103b67b 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): @@ -14,3 +24,20 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 11e23f8c..a10528ca 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -1,10 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict +from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import ( + FieldMetadata, + PathParamMetadata, + RequestMetadata, + get_discriminator, +) +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): @@ -23,3 +32,20 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + Annotated[ClassifierFTModelOut, Tag("classifier")], + Annotated[CompletionFTModelOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index 316bf89f..abdf18fd 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -1,13 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jobout import JobOut, JobOutTypedDict +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +JobsOutDataTypedDict = TypeAliasType( + "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +JobsOutData = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] JobsOutObject = Literal["list"] @@ -15,16 +29,13 @@ class JobsOutTypedDict(TypedDict): total: int - data: NotRequired[List[JobOutTypedDict]] - object: JobsOutObject + data: NotRequired[List[JobsOutDataTypedDict]] + object: NotRequired[JobsOutObject] class JobsOut(BaseModel): total: int - data: Optional[List[JobOut]] = None + data: Optional[List[JobsOutData]] = None - OBJECT: Annotated[ - Annotated[Optional[JobsOutObject], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" + object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index df6b3d35..1741570e 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict LegacyJobMetadataOutObject = Literal["job.metadata"] @@ -33,7 +30,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: LegacyJobMetadataOutObject + object: NotRequired[LegacyJobMetadataOutObject] class LegacyJobMetadataOut(BaseModel): @@ -67,13 +64,7 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - OBJECT: Annotated[ - Annotated[ - Optional[LegacyJobMetadataOutObject], - AfterValidator(validate_const("job.metadata")), - ], - pydantic.Field(alias="object"), - ] = "job.metadata" + object: Optional[LegacyJobMetadataOutObject] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/classificationobject.py b/src/mistralai/models/moderationobject.py similarity index 65% rename from src/mistralai/models/classificationobject.py rename to src/mistralai/models/moderationobject.py index e4ee3624..5eff2d2a 100644 --- a/src/mistralai/models/classificationobject.py +++ b/src/mistralai/models/moderationobject.py @@ -6,16 +6,16 @@ from typing_extensions import NotRequired, TypedDict -class ClassificationObjectTypedDict(TypedDict): +class ModerationObjectTypedDict(TypedDict): categories: NotRequired[Dict[str, bool]] - r"""Classifier result thresholded""" + r"""Moderation result thresholds""" category_scores: NotRequired[Dict[str, float]] - r"""Classifier result""" + r"""Moderation result""" -class ClassificationObject(BaseModel): +class ModerationObject(BaseModel): categories: Optional[Dict[str, bool]] = None - r"""Classifier result thresholded""" + r"""Moderation result thresholds""" category_scores: Optional[Dict[str, float]] = None - r"""Classifier result""" + r"""Moderation result""" diff --git a/src/mistralai/models/moderationresponse.py b/src/mistralai/models/moderationresponse.py new file mode 100644 index 00000000..ed13cd6b --- /dev/null +++ b/src/mistralai/models/moderationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ModerationResponseTypedDict(TypedDict): + id: str + model: str + results: List[ModerationObjectTypedDict] + + +class ModerationResponse(BaseModel): + id: str + + model: str + + results: List[ModerationObject] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6540df1f..6b2f730d 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict UnarchiveFTModelOutObject = Literal["model"] @@ -14,18 +11,13 @@ class UnarchiveFTModelOutTypedDict(TypedDict): id: str - object: UnarchiveFTModelOutObject + object: NotRequired[UnarchiveFTModelOutObject] archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[ - Annotated[ - Optional[UnarchiveFTModelOutObject], AfterValidator(validate_const("model")) - ], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[UnarchiveFTModelOutObject] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index d82f921a..2bafc035 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict WandbIntegrationType = Literal["wandb"] @@ -18,7 +15,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" - type: WandbIntegrationType + type: NotRequired[WandbIntegrationType] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -31,12 +28,7 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - TYPE: Annotated[ - Annotated[ - Optional[WandbIntegrationType], AfterValidator(validate_const("wandb")) - ], - pydantic.Field(alias="type"), - ] = "wandb" + type: Optional[WandbIntegrationType] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index 5514b595..bb624bd8 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict WandbIntegrationOutType = Literal["wandb"] @@ -16,32 +13,30 @@ class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" - type: WandbIntegrationOutType + type: NotRequired[WandbIntegrationOutType] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - TYPE: Annotated[ - Annotated[ - Optional[WandbIntegrationOutType], AfterValidator(validate_const("wandb")) - ], - pydantic.Field(alias="type"), - ] = "wandb" + type: Optional[WandbIntegrationOutType] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: OptionalNullable[str] = UNSET + url: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] - nullable_fields = ["name", "run_name"] + optional_fields = ["type", "name", "run_name", "url"] + nullable_fields = ["name", "run_name", "url"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 0b04694d..96aab468 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -607,7 +607,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FTModelOut: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -680,7 +680,10 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FTModelOut) + return utils.unmarshal_json( + http_res.text, + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -711,7 +714,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FTModelOut: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -784,7 +787,10 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FTModelOut) + return utils.unmarshal_json( + http_res.text, + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( From a4f2b562fd59606de403de2dbbeb736712e926ee Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Wed, 16 Apr 2025 20:38:46 +0200 Subject: [PATCH 2/3] update examples --- examples/async_classifier.py | 98 +++++++++++++++++++ examples/async_files.py | 2 +- examples/async_jobs.py | 8 +- examples/async_jobs_chat.py | 27 +++-- examples/dry_run_job.py | 6 +- examples/files.py | 2 +- examples/fixtures/classifier_sentiments.jsonl | 33 +++++++ .../ft_training_file.jsonl} | 0 .../ft_validation_file.jsonl} | 0 examples/jobs.py | 8 +- 10 files changed, 162 insertions(+), 22 deletions(-) create mode 100644 examples/async_classifier.py create mode 100644 examples/fixtures/classifier_sentiments.jsonl rename examples/{file.jsonl => fixtures/ft_training_file.jsonl} (100%) rename examples/{validation_file.jsonl => fixtures/ft_validation_file.jsonl} (100%) diff --git a/examples/async_classifier.py b/examples/async_classifier.py new file mode 100644 index 00000000..10c8bb76 --- /dev/null +++ b/examples/async_classifier.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +from pprint import pprint +import asyncio +from mistralai import Mistral, TrainingFile, ClassifierTrainingParametersIn + +import os + + +async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: + # Upload files + print("Uploading files...") + + file_ids = [] + for file_name in file_names: + with open(file_name, "rb") as file: + f = await client.files.upload_async( + file={ + "file_name": file_name, + "content": file.read(), + }, + purpose="fine-tune", + ) + file_ids.append(f.id) + print("Files uploaded...") + return file_ids + + +async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: + print("Creating job...") + job = await client.fine_tuning.jobs.create_async( + model="ministral-3b-latest", + job_type="classifier", + training_files=[ + TrainingFile(file_id=training_file_id) + for training_file_id in training_file_ids + ], + hyperparameters=ClassifierTrainingParametersIn( + learning_rate=0.0001, + ), + auto_start=True, + ) + + print(f"Job created ({job.id})") + + i = 1 + while True: + await asyncio.sleep(10) + detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if detailed_job.status not in [ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + ]: + break + print(f"Still training after {i * 10} seconds") + i += 1 + + if detailed_job.status != "SUCCESS": + print("Training failed") + raise Exception(f"Job failed {detailed_job.status}") + + print(f"Training succeed: {detailed_job.fine_tuned_model}") + + return detailed_job.fine_tuned_model + + +async def main(): + training_files = ["./examples/fixtures/classifier_sentiments.jsonl"] + client = Mistral( + api_key=os.environ["MISTRAL_API_KEY"], + ) + + training_file_ids: list[str] = await upload_files(client=client, file_names=training_files) + model_name: str | None = await train_classifier(client=client,training_file_ids=training_file_ids) + + if model_name: + print("Calling inference...") + response = client.classifiers.classify( + model=model_name, + inputs=["It's nice", "It's terrible", "Why not"], + ) + print("Inference succeed !") + pprint(response) + + print("Calling inference (Chat)...") + response = client.classifiers.classify_chat( + model=model_name, + inputs={"messages": [{"role": "user", "content": "Lame..."}]}, + ) + print("Inference succeed (Chat)!") + pprint(response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_files.py b/examples/async_files.py index 64c99484..4dc21542 100644 --- a/examples/async_files.py +++ b/examples/async_files.py @@ -16,7 +16,7 @@ async def main(): created_file = await client.files.upload_async( file=File( file_name="training_file.jsonl", - content=open("examples/file.jsonl", "rb").read(), + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), ) ) print(created_file) diff --git a/examples/async_jobs.py b/examples/async_jobs.py index b1f9e3bf..44a58af1 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -4,7 +4,7 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import File, CompletionTrainingParametersIn async def main(): @@ -13,11 +13,11 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = await client.files.upload_async( file=File(file_name="validation_file.jsonl", content=f) ) @@ -27,7 +27,7 @@ async def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, ), diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index 7e0d0577..84327b32 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -4,7 +4,10 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import ( + File, + CompletionTrainingParametersIn, +) POLLING_INTERVAL = 10 @@ -14,11 +17,11 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = await client.files.upload_async( file=File(file_name="validation_file.jsonl", content=f) ) @@ -27,22 +30,28 @@ async def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( - training_steps=1, + hyperparameters=CompletionTrainingParametersIn( + training_steps=2, learning_rate=0.0001, ), ) print(created_job) - while created_job.status in ["RUNNING", "QUEUED"]: + while created_job.status in [ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + ]: created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) - if created_job.status == "FAILED": + if created_job.status != "SUCCESS": print("Job failed") - return - + raise Exception(f"Job failed with {created_job.status}") + print(created_job) # Chat with model response = await client.chat.complete_async( model=created_job.fine_tuned_model, diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index 3c2a6369..84a2d0ce 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -4,7 +4,7 @@ import os from mistralai import Mistral -from mistralai.models import TrainingParametersIn +from mistralai.models import CompletionTrainingParametersIn async def main(): @@ -13,7 +13,7 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file={"file_name": "test-file.jsonl", "content": f} ) @@ -22,7 +22,7 @@ async def main(): dry_run_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, warmup_fraction=0.01, diff --git a/examples/files.py b/examples/files.py index a10fd031..5dce880b 100644 --- a/examples/files.py +++ b/examples/files.py @@ -15,7 +15,7 @@ def main(): created_file = client.files.upload( file=File( file_name="training_file.jsonl", - content=open("examples/file.jsonl", "rb").read(), + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), ) ) print(created_file) diff --git a/examples/fixtures/classifier_sentiments.jsonl b/examples/fixtures/classifier_sentiments.jsonl new file mode 100644 index 00000000..e5507198 --- /dev/null +++ b/examples/fixtures/classifier_sentiments.jsonl @@ -0,0 +1,33 @@ +{"text": "I love this product!", "labels": {"sentiment": "positive"}} +{"text": "The game was amazing.", "labels": {"sentiment": "positive"}} +{"text": "The new policy is controversial.", "labels": {"sentiment": "neutral"}} +{"text": "I don't like the new design.", "labels": {"sentiment": "negative"}} +{"text": "The team won the championship.", "labels": {"sentiment": "positive"}} +{"text": "The economy is in a bad shape.", "labels": {"sentiment": "negative"}} +{"text": "The weather is nice today.", "labels": {"sentiment": "positive"}} +{"text": "The match ended in a draw.", "labels": {"sentiment": "neutral"}} +{"text": "The new law will be implemented soon.", "labels": {"sentiment": "neutral"}} +{"text": "I had a great time at the concert.", "labels": {"sentiment": "positive"}} +{"text": "This movie was fantastic!", "labels": {"sentiment": "positive"}} +{"text": "The service was terrible.", "labels": {"sentiment": "negative"}} +{"text": "The food was delicious.", "labels": {"sentiment": "positive"}} +{"text": "I'm not sure about this decision.", "labels": {"sentiment": "neutral"}} +{"text": "The book was boring.", "labels": {"sentiment": "negative"}} +{"text": "The view from the top was breathtaking.", "labels": {"sentiment": "positive"}} +{"text": "The traffic was awful today.", "labels": {"sentiment": "negative"}} +{"text": "The event was well-organized.", "labels": {"sentiment": "positive"}} +{"text": "The meeting went on for too long.", "labels": {"sentiment": "negative"}} +{"text": "The presentation was informative.", "labels": {"sentiment": "positive"}} +{"text": "The new software update is buggy.", "labels": {"sentiment": "negative"}} +{"text": "The concert was sold out.", "labels": {"sentiment": "positive"}} +{"text": "The weather forecast is unreliable.", "labels": {"sentiment": "negative"}} +{"text": "The new phone is expensive.", "labels": {"sentiment": "neutral"}} +{"text": "The customer service was excellent.", "labels": {"sentiment": "positive"}} +{"text": "The new restaurant opened today.", "labels": {"sentiment": "neutral"}} +{"text": "The movie had a surprising ending.", "labels": {"sentiment": "positive"}} +{"text": "The project deadline is approaching.", "labels": {"sentiment": "neutral"}} +{"text": "The team is working hard.", "labels": {"sentiment": "positive"}} +{"text": "The new product launch was successful.", "labels": {"sentiment": "positive"}} +{"text": "The conference was insightful.", "labels": {"sentiment": "positive"}} +{"text": "The flight was delayed.", "labels": {"sentiment": "negative"}} +{"text": "The vacation was relaxing.", "labels": {"sentiment": "positive"}} \ No newline at end of file diff --git a/examples/file.jsonl b/examples/fixtures/ft_training_file.jsonl similarity index 100% rename from examples/file.jsonl rename to examples/fixtures/ft_training_file.jsonl diff --git a/examples/validation_file.jsonl b/examples/fixtures/ft_validation_file.jsonl similarity index 100% rename from examples/validation_file.jsonl rename to examples/fixtures/ft_validation_file.jsonl diff --git a/examples/jobs.py b/examples/jobs.py index 246edace..f65fda8e 100644 --- a/examples/jobs.py +++ b/examples/jobs.py @@ -2,7 +2,7 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import File, CompletionTrainingParametersIn def main(): @@ -11,11 +11,11 @@ def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = client.files.upload( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = client.files.upload( file=File(file_name="validation_file.jsonl", content=f) ) @@ -25,7 +25,7 @@ def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, ), From a333d52be068a71fc6e0c9d9867da0c4aa2a8bde Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Wed, 16 Apr 2025 21:16:51 +0200 Subject: [PATCH 3/3] fix inputs --- .speakeasy/gen.lock | 10 ++-- .speakeasy/workflow.lock | 11 ++-- docs/models/chatclassificationrequest.md | 8 +-- .../models/chatclassificationrequestinputs.md | 19 ------- docs/models/embeddingrequest.md | 8 +-- docs/models/embeddingrequestinputs.md | 19 +++++++ docs/models/inputs.md | 10 ++-- docs/models/instructrequestinputs.md | 8 +++ docs/models/instructrequestinputsmessages.md | 29 ++++++++++ docs/sdks/classifiers/README.md | 23 ++++---- docs/sdks/embeddings/README.md | 10 ++-- src/mistralai/classifiers.py | 18 ++----- src/mistralai/embeddings.py | 8 ++- src/mistralai/models/__init__.py | 24 ++++++--- .../models/chatclassificationrequest.py | 12 ++--- .../models/chatclassificationrequestinputs.py | 19 ------- src/mistralai/models/embeddingrequest.py | 10 ++-- src/mistralai/models/inputs.py | 54 +++++++++++++++++++ 18 files changed, 188 insertions(+), 112 deletions(-) delete mode 100644 docs/models/chatclassificationrequestinputs.md create mode 100644 docs/models/embeddingrequestinputs.md create mode 100644 docs/models/instructrequestinputs.md create mode 100644 docs/models/instructrequestinputsmessages.md delete mode 100644 src/mistralai/models/chatclassificationrequestinputs.py create mode 100644 src/mistralai/models/inputs.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 8f91267d..bc731e9a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 2c14111bcd5cc5624575faefc3e5ddae + docChecksum: 63f1a973632e9afab0da3d2498994c1b docVersion: 0.0.2 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 @@ -69,7 +69,6 @@ generatedFiles: - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md - docs/models/chatclassificationrequest.md - - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -128,6 +127,7 @@ generatedFiles: - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md - docs/models/eventout.md @@ -167,6 +167,8 @@ generatedFiles: - docs/models/imageurlchunktype.md - docs/models/inputs.md - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md - docs/models/instructrequestmessages.md - docs/models/integrations.md - docs/models/jobin.md @@ -306,7 +308,6 @@ generatedFiles: - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py - src/mistralai/models/chatclassificationrequest.py - - src/mistralai/models/chatclassificationrequestinputs.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py @@ -365,6 +366,7 @@ generatedFiles: - src/mistralai/models/httpvalidationerror.py - src/mistralai/models/imageurl.py - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputs.py - src/mistralai/models/instructrequest.py - src/mistralai/models/jobin.py - src/mistralai/models/jobmetadataout.py @@ -727,7 +729,7 @@ examples: chat_classifications_v1_chat_classifications_post: speakeasy-default-chat-classifications-v1-chat-classifications-post: requestBody: - application/json: {"model": "Fortwo", "inputs": [{"messages": [{"prefix": false, "role": "assistant"}, {"prefix": false, "role": "assistant"}]}]} + application/json: {"model": "Fortwo", "input": [{"messages": [{"content": "", "role": "tool"}]}, {"messages": []}]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 1b29132d..5c57d996 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2bf3d26638f594c87cbc903f32b1d5c101d01bca4b92a63bb8ce3dd9c3bf49e6 - sourceBlobDigest: sha256:f395b0bda941385b5f2782ffba1261bfad5730f4975dcb6ff71592ae34662c25 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 tags: - latest - - speakeasy-sdk-regen-1744819913 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2bf3d26638f594c87cbc903f32b1d5c101d01bca4b92a63bb8ce3dd9c3bf49e6 - sourceBlobDigest: sha256:f395b0bda941385b5f2782ffba1261bfad5730f4975dcb6ff71592ae34662c25 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:4d37afd772178799966e64c0a4b19b48e689ce1e235a8902be7eed5ffe8dad58 + codeSamplesRevisionDigest: sha256:7c657301f482932fca0a3e914d3c25820ebb7e535e1887daea3cd9240eca0444 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md index 53a05601..910d62ae 100644 --- a/docs/models/chatclassificationrequest.md +++ b/docs/models/chatclassificationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatclassificationrequestinputs.md deleted file mode 100644 index 18214f0b..00000000 --- a/docs/models/chatclassificationrequestinputs.md +++ /dev/null @@ -1,19 +0,0 @@ -# ChatClassificationRequestInputs - -Chat to classify - - -## Supported Types - -### `models.InstructRequest` - -```python -value: models.InstructRequest = /* values here */ -``` - -### `List[models.InstructRequest]` - -```python -value: List[models.InstructRequest] = /* values here */ -``` - diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3bdd79e8..242bb3e3 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md new file mode 100644 index 00000000..a3f82c1c --- /dev/null +++ b/docs/models/embeddingrequestinputs.md @@ -0,0 +1,19 @@ +# EmbeddingRequestInputs + +Text to embed. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/inputs.md b/docs/models/inputs.md index 45264f9e..0f62a7ce 100644 --- a/docs/models/inputs.md +++ b/docs/models/inputs.md @@ -1,19 +1,19 @@ # Inputs -Text to embed. +Chat to classify ## Supported Types -### `str` +### `models.InstructRequestInputs` ```python -value: str = /* values here */ +value: models.InstructRequestInputs = /* values here */ ``` -### `List[str]` +### `List[models.InstructRequest]` ```python -value: List[str] = /* values here */ +value: List[models.InstructRequest] = /* values here */ ``` diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md new file mode 100644 index 00000000..4caa028f --- /dev/null +++ b/docs/models/instructrequestinputs.md @@ -0,0 +1,8 @@ +# InstructRequestInputs + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestInputsMessages](../models/instructrequestinputsmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputsmessages.md b/docs/models/instructrequestinputsmessages.md new file mode 100644 index 00000000..237e131f --- /dev/null +++ b/docs/models/instructrequestinputsmessages.md @@ -0,0 +1,29 @@ +# InstructRequestInputsMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 12d472e0..19761046 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -202,15 +202,16 @@ with Mistral( { "messages": [ { - "prefix": False, - "role": "assistant", - }, - { - "prefix": False, - "role": "assistant", + "content": "", + "role": "tool", }, ], }, + { + "messages": [ + + ], + }, ]) # Handle response @@ -220,11 +221,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 8c386439..d55b38fb 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -36,11 +36,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 91be11c1..e5b46f5d 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -651,10 +651,7 @@ def classify_chat( self, *, model: str, - inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, - ], + inputs: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -681,9 +678,7 @@ def classify_chat( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + inputs=utils.get_pydantic_model(inputs, models.Inputs), ) req = self._build_request( @@ -759,10 +754,7 @@ async def classify_chat_async( self, *, model: str, - inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, - ], + inputs: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -789,9 +781,7 @@ async def classify_chat_async( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + inputs=utils.get_pydantic_model(inputs, models.Inputs), ) req = self._build_request_async( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index b99ff0cf..b81a5e37 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -15,7 +15,9 @@ def create( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -120,7 +122,9 @@ async def create_async( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index fa756356..d56f5bf8 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -43,10 +43,6 @@ ChatClassificationRequest, ChatClassificationRequestTypedDict, ) -from .chatclassificationrequestinputs import ( - ChatClassificationRequestInputs, - ChatClassificationRequestInputsTypedDict, -) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -193,9 +189,9 @@ ) from .embeddingrequest import ( EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, EmbeddingRequestTypedDict, - Inputs, - InputsTypedDict, ) from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict @@ -276,6 +272,14 @@ ImageURLChunkType, ImageURLChunkTypedDict, ) +from .inputs import ( + Inputs, + InputsTypedDict, + InstructRequestInputs, + InstructRequestInputsMessages, + InstructRequestInputsMessagesTypedDict, + InstructRequestInputsTypedDict, +) from .instructrequest import ( InstructRequest, InstructRequestMessages, @@ -484,8 +488,6 @@ "BatchJobsOutObject", "BatchJobsOutTypedDict", "ChatClassificationRequest", - "ChatClassificationRequestInputs", - "ChatClassificationRequestInputsTypedDict", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", @@ -588,6 +590,8 @@ "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", @@ -658,6 +662,10 @@ "Inputs", "InputsTypedDict", "InstructRequest", + "InstructRequestInputs", + "InstructRequestInputsMessages", + "InstructRequestInputsMessagesTypedDict", + "InstructRequestInputsTypedDict", "InstructRequestMessages", "InstructRequestMessagesTypedDict", "InstructRequestTypedDict", diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py index b71bd2ea..f06f4f34 100644 --- a/src/mistralai/models/chatclassificationrequest.py +++ b/src/mistralai/models/chatclassificationrequest.py @@ -1,22 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .chatclassificationrequestinputs import ( - ChatClassificationRequestInputs, - ChatClassificationRequestInputsTypedDict, -) +from .inputs import Inputs, InputsTypedDict from mistralai.types import BaseModel -from typing_extensions import TypedDict +import pydantic +from typing_extensions import Annotated, TypedDict class ChatClassificationRequestTypedDict(TypedDict): model: str - inputs: ChatClassificationRequestInputsTypedDict + inputs: InputsTypedDict r"""Chat to classify""" class ChatClassificationRequest(BaseModel): model: str - inputs: ChatClassificationRequestInputs + inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Chat to classify""" diff --git a/src/mistralai/models/chatclassificationrequestinputs.py b/src/mistralai/models/chatclassificationrequestinputs.py deleted file mode 100644 index 86a2699a..00000000 --- a/src/mistralai/models/chatclassificationrequestinputs.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .instructrequest import InstructRequest, InstructRequestTypedDict -from typing import List, Union -from typing_extensions import TypeAliasType - - -ChatClassificationRequestInputsTypedDict = TypeAliasType( - "ChatClassificationRequestInputsTypedDict", - Union[InstructRequestTypedDict, List[InstructRequestTypedDict]], -) -r"""Chat to classify""" - - -ChatClassificationRequestInputs = TypeAliasType( - "ChatClassificationRequestInputs", Union[InstructRequest, List[InstructRequest]] -) -r"""Chat to classify""" diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5c37fd48..bf9ce3ff 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -7,18 +7,20 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) r"""Text to embed.""" -Inputs = TypeAliasType("Inputs", Union[str, List[str]]) +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) r"""Text to embed.""" class EmbeddingRequestTypedDict(TypedDict): model: str r"""ID of the model to use.""" - inputs: InputsTypedDict + inputs: EmbeddingRequestInputsTypedDict r"""Text to embed.""" @@ -26,5 +28,5 @@ class EmbeddingRequest(BaseModel): model: str r"""ID of the model to use.""" - inputs: Annotated[Inputs, pydantic.Field(alias="input")] + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] r"""Text to embed.""" diff --git a/src/mistralai/models/inputs.py b/src/mistralai/models/inputs.py new file mode 100644 index 00000000..34d20f34 --- /dev/null +++ b/src/mistralai/models/inputs.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .instructrequest import InstructRequest, InstructRequestTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestInputsMessagesTypedDict = TypeAliasType( + "InstructRequestInputsMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestInputsMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestInputsTypedDict(TypedDict): + messages: List[InstructRequestInputsMessagesTypedDict] + + +class InstructRequestInputs(BaseModel): + messages: List[InstructRequestInputsMessages] + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", + Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +r"""Chat to classify"""